]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-2.6.32.46-201108292233.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.46-201108292233.patch
CommitLineData
d17a7b90
PK
1diff -urNp linux-2.6.32.46/arch/alpha/include/asm/elf.h linux-2.6.32.46/arch/alpha/include/asm/elf.h
2--- linux-2.6.32.46/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3+++ linux-2.6.32.46/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-2.6.32.46/arch/alpha/include/asm/pgtable.h linux-2.6.32.46/arch/alpha/include/asm/pgtable.h
19--- linux-2.6.32.46/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20+++ linux-2.6.32.46/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-2.6.32.46/arch/alpha/kernel/module.c linux-2.6.32.46/arch/alpha/kernel/module.c
40--- linux-2.6.32.46/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41+++ linux-2.6.32.46/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-2.6.32.46/arch/alpha/kernel/osf_sys.c linux-2.6.32.46/arch/alpha/kernel/osf_sys.c
52--- linux-2.6.32.46/arch/alpha/kernel/osf_sys.c 2011-08-09 18:35:28.000000000 -0400
53+++ linux-2.6.32.46/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54@@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-2.6.32.46/arch/alpha/mm/fault.c linux-2.6.32.46/arch/alpha/mm/fault.c
86--- linux-2.6.32.46/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
87+++ linux-2.6.32.46/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-2.6.32.46/arch/arm/include/asm/elf.h linux-2.6.32.46/arch/arm/include/asm/elf.h
245--- linux-2.6.32.46/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
246+++ linux-2.6.32.46/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
247@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263diff -urNp linux-2.6.32.46/arch/arm/include/asm/kmap_types.h linux-2.6.32.46/arch/arm/include/asm/kmap_types.h
264--- linux-2.6.32.46/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
265+++ linux-2.6.32.46/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
266@@ -19,6 +19,7 @@ enum km_type {
267 KM_SOFTIRQ0,
268 KM_SOFTIRQ1,
269 KM_L2_CACHE,
270+ KM_CLEARPAGE,
271 KM_TYPE_NR
272 };
273
274diff -urNp linux-2.6.32.46/arch/arm/include/asm/uaccess.h linux-2.6.32.46/arch/arm/include/asm/uaccess.h
275--- linux-2.6.32.46/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
276+++ linux-2.6.32.46/arch/arm/include/asm/uaccess.h 2011-06-29 21:02:24.000000000 -0400
277@@ -22,6 +22,8 @@
278 #define VERIFY_READ 0
279 #define VERIFY_WRITE 1
280
281+extern void check_object_size(const void *ptr, unsigned long n, bool to);
282+
283 /*
284 * The exception table consists of pairs of addresses: the first is the
285 * address of an instruction that is allowed to fault, and the second is
286@@ -387,8 +389,23 @@ do { \
287
288
289 #ifdef CONFIG_MMU
290-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
291-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
292+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
293+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
294+
295+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
296+{
297+ if (!__builtin_constant_p(n))
298+ check_object_size(to, n, false);
299+ return ___copy_from_user(to, from, n);
300+}
301+
302+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
303+{
304+ if (!__builtin_constant_p(n))
305+ check_object_size(from, n, true);
306+ return ___copy_to_user(to, from, n);
307+}
308+
309 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
310 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
311 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
312@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
313
314 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
315 {
316+ if ((long)n < 0)
317+ return n;
318+
319 if (access_ok(VERIFY_READ, from, n))
320 n = __copy_from_user(to, from, n);
321 else /* security hole - plug it */
322@@ -412,6 +432,9 @@ static inline unsigned long __must_check
323
324 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
325 {
326+ if ((long)n < 0)
327+ return n;
328+
329 if (access_ok(VERIFY_WRITE, to, n))
330 n = __copy_to_user(to, from, n);
331 return n;
332diff -urNp linux-2.6.32.46/arch/arm/kernel/armksyms.c linux-2.6.32.46/arch/arm/kernel/armksyms.c
333--- linux-2.6.32.46/arch/arm/kernel/armksyms.c 2011-03-27 14:31:47.000000000 -0400
334+++ linux-2.6.32.46/arch/arm/kernel/armksyms.c 2011-07-06 19:51:50.000000000 -0400
335@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
336 #ifdef CONFIG_MMU
337 EXPORT_SYMBOL(copy_page);
338
339-EXPORT_SYMBOL(__copy_from_user);
340-EXPORT_SYMBOL(__copy_to_user);
341+EXPORT_SYMBOL(___copy_from_user);
342+EXPORT_SYMBOL(___copy_to_user);
343 EXPORT_SYMBOL(__clear_user);
344
345 EXPORT_SYMBOL(__get_user_1);
346diff -urNp linux-2.6.32.46/arch/arm/kernel/kgdb.c linux-2.6.32.46/arch/arm/kernel/kgdb.c
347--- linux-2.6.32.46/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
348+++ linux-2.6.32.46/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
349@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
350 * and we handle the normal undef case within the do_undefinstr
351 * handler.
352 */
353-struct kgdb_arch arch_kgdb_ops = {
354+const struct kgdb_arch arch_kgdb_ops = {
355 #ifndef __ARMEB__
356 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
357 #else /* ! __ARMEB__ */
358diff -urNp linux-2.6.32.46/arch/arm/kernel/traps.c linux-2.6.32.46/arch/arm/kernel/traps.c
359--- linux-2.6.32.46/arch/arm/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
360+++ linux-2.6.32.46/arch/arm/kernel/traps.c 2011-06-13 21:31:18.000000000 -0400
361@@ -247,6 +247,8 @@ static void __die(const char *str, int e
362
363 DEFINE_SPINLOCK(die_lock);
364
365+extern void gr_handle_kernel_exploit(void);
366+
367 /*
368 * This function is protected against re-entrancy.
369 */
370@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
371 if (panic_on_oops)
372 panic("Fatal exception");
373
374+ gr_handle_kernel_exploit();
375+
376 do_exit(SIGSEGV);
377 }
378
379diff -urNp linux-2.6.32.46/arch/arm/lib/copy_from_user.S linux-2.6.32.46/arch/arm/lib/copy_from_user.S
380--- linux-2.6.32.46/arch/arm/lib/copy_from_user.S 2011-03-27 14:31:47.000000000 -0400
381+++ linux-2.6.32.46/arch/arm/lib/copy_from_user.S 2011-06-29 20:48:38.000000000 -0400
382@@ -16,7 +16,7 @@
383 /*
384 * Prototype:
385 *
386- * size_t __copy_from_user(void *to, const void *from, size_t n)
387+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
388 *
389 * Purpose:
390 *
391@@ -84,11 +84,11 @@
392
393 .text
394
395-ENTRY(__copy_from_user)
396+ENTRY(___copy_from_user)
397
398 #include "copy_template.S"
399
400-ENDPROC(__copy_from_user)
401+ENDPROC(___copy_from_user)
402
403 .section .fixup,"ax"
404 .align 0
405diff -urNp linux-2.6.32.46/arch/arm/lib/copy_to_user.S linux-2.6.32.46/arch/arm/lib/copy_to_user.S
406--- linux-2.6.32.46/arch/arm/lib/copy_to_user.S 2011-03-27 14:31:47.000000000 -0400
407+++ linux-2.6.32.46/arch/arm/lib/copy_to_user.S 2011-06-29 20:46:49.000000000 -0400
408@@ -16,7 +16,7 @@
409 /*
410 * Prototype:
411 *
412- * size_t __copy_to_user(void *to, const void *from, size_t n)
413+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
414 *
415 * Purpose:
416 *
417@@ -88,11 +88,11 @@
418 .text
419
420 ENTRY(__copy_to_user_std)
421-WEAK(__copy_to_user)
422+WEAK(___copy_to_user)
423
424 #include "copy_template.S"
425
426-ENDPROC(__copy_to_user)
427+ENDPROC(___copy_to_user)
428
429 .section .fixup,"ax"
430 .align 0
431diff -urNp linux-2.6.32.46/arch/arm/lib/uaccess.S linux-2.6.32.46/arch/arm/lib/uaccess.S
432--- linux-2.6.32.46/arch/arm/lib/uaccess.S 2011-03-27 14:31:47.000000000 -0400
433+++ linux-2.6.32.46/arch/arm/lib/uaccess.S 2011-06-29 20:48:53.000000000 -0400
434@@ -19,7 +19,7 @@
435
436 #define PAGE_SHIFT 12
437
438-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
439+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
440 * Purpose : copy a block to user memory from kernel memory
441 * Params : to - user memory
442 * : from - kernel memory
443@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fau
444 sub r2, r2, ip
445 b .Lc2u_dest_aligned
446
447-ENTRY(__copy_to_user)
448+ENTRY(___copy_to_user)
449 stmfd sp!, {r2, r4 - r7, lr}
450 cmp r2, #4
451 blt .Lc2u_not_enough
452@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fau
453 ldrgtb r3, [r1], #0
454 USER( strgtbt r3, [r0], #1) @ May fault
455 b .Lc2u_finished
456-ENDPROC(__copy_to_user)
457+ENDPROC(___copy_to_user)
458
459 .section .fixup,"ax"
460 .align 0
461 9001: ldmfd sp!, {r0, r4 - r7, pc}
462 .previous
463
464-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
465+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
466 * Purpose : copy a block from user memory to kernel memory
467 * Params : to - kernel memory
468 * : from - user memory
469@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fau
470 sub r2, r2, ip
471 b .Lcfu_dest_aligned
472
473-ENTRY(__copy_from_user)
474+ENTRY(___copy_from_user)
475 stmfd sp!, {r0, r2, r4 - r7, lr}
476 cmp r2, #4
477 blt .Lcfu_not_enough
478@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fau
479 USER( ldrgtbt r3, [r1], #1) @ May fault
480 strgtb r3, [r0], #1
481 b .Lcfu_finished
482-ENDPROC(__copy_from_user)
483+ENDPROC(___copy_from_user)
484
485 .section .fixup,"ax"
486 .align 0
487diff -urNp linux-2.6.32.46/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.32.46/arch/arm/lib/uaccess_with_memcpy.c
488--- linux-2.6.32.46/arch/arm/lib/uaccess_with_memcpy.c 2011-03-27 14:31:47.000000000 -0400
489+++ linux-2.6.32.46/arch/arm/lib/uaccess_with_memcpy.c 2011-06-29 20:44:35.000000000 -0400
490@@ -97,7 +97,7 @@ out:
491 }
492
493 unsigned long
494-__copy_to_user(void __user *to, const void *from, unsigned long n)
495+___copy_to_user(void __user *to, const void *from, unsigned long n)
496 {
497 /*
498 * This test is stubbed out of the main function above to keep
499diff -urNp linux-2.6.32.46/arch/arm/mach-at91/pm.c linux-2.6.32.46/arch/arm/mach-at91/pm.c
500--- linux-2.6.32.46/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
501+++ linux-2.6.32.46/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
502@@ -348,7 +348,7 @@ static void at91_pm_end(void)
503 }
504
505
506-static struct platform_suspend_ops at91_pm_ops ={
507+static const struct platform_suspend_ops at91_pm_ops ={
508 .valid = at91_pm_valid_state,
509 .begin = at91_pm_begin,
510 .enter = at91_pm_enter,
511diff -urNp linux-2.6.32.46/arch/arm/mach-omap1/pm.c linux-2.6.32.46/arch/arm/mach-omap1/pm.c
512--- linux-2.6.32.46/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
513+++ linux-2.6.32.46/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
514@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
515
516
517
518-static struct platform_suspend_ops omap_pm_ops ={
519+static const struct platform_suspend_ops omap_pm_ops ={
520 .prepare = omap_pm_prepare,
521 .enter = omap_pm_enter,
522 .finish = omap_pm_finish,
523diff -urNp linux-2.6.32.46/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.46/arch/arm/mach-omap2/pm24xx.c
524--- linux-2.6.32.46/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
525+++ linux-2.6.32.46/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
526@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
527 enable_hlt();
528 }
529
530-static struct platform_suspend_ops omap_pm_ops = {
531+static const struct platform_suspend_ops omap_pm_ops = {
532 .prepare = omap2_pm_prepare,
533 .enter = omap2_pm_enter,
534 .finish = omap2_pm_finish,
535diff -urNp linux-2.6.32.46/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.46/arch/arm/mach-omap2/pm34xx.c
536--- linux-2.6.32.46/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
537+++ linux-2.6.32.46/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
538@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
539 return;
540 }
541
542-static struct platform_suspend_ops omap_pm_ops = {
543+static const struct platform_suspend_ops omap_pm_ops = {
544 .begin = omap3_pm_begin,
545 .end = omap3_pm_end,
546 .prepare = omap3_pm_prepare,
547diff -urNp linux-2.6.32.46/arch/arm/mach-pnx4008/pm.c linux-2.6.32.46/arch/arm/mach-pnx4008/pm.c
548--- linux-2.6.32.46/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
549+++ linux-2.6.32.46/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
550@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
551 (state == PM_SUSPEND_MEM);
552 }
553
554-static struct platform_suspend_ops pnx4008_pm_ops = {
555+static const struct platform_suspend_ops pnx4008_pm_ops = {
556 .enter = pnx4008_pm_enter,
557 .valid = pnx4008_pm_valid,
558 };
559diff -urNp linux-2.6.32.46/arch/arm/mach-pxa/pm.c linux-2.6.32.46/arch/arm/mach-pxa/pm.c
560--- linux-2.6.32.46/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
561+++ linux-2.6.32.46/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
562@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
563 pxa_cpu_pm_fns->finish();
564 }
565
566-static struct platform_suspend_ops pxa_pm_ops = {
567+static const struct platform_suspend_ops pxa_pm_ops = {
568 .valid = pxa_pm_valid,
569 .enter = pxa_pm_enter,
570 .prepare = pxa_pm_prepare,
571diff -urNp linux-2.6.32.46/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.46/arch/arm/mach-pxa/sharpsl_pm.c
572--- linux-2.6.32.46/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
573+++ linux-2.6.32.46/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
574@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
575 }
576
577 #ifdef CONFIG_PM
578-static struct platform_suspend_ops sharpsl_pm_ops = {
579+static const struct platform_suspend_ops sharpsl_pm_ops = {
580 .prepare = pxa_pm_prepare,
581 .finish = pxa_pm_finish,
582 .enter = corgi_pxa_pm_enter,
583diff -urNp linux-2.6.32.46/arch/arm/mach-sa1100/pm.c linux-2.6.32.46/arch/arm/mach-sa1100/pm.c
584--- linux-2.6.32.46/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
585+++ linux-2.6.32.46/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
586@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
587 return virt_to_phys(sp);
588 }
589
590-static struct platform_suspend_ops sa11x0_pm_ops = {
591+static const struct platform_suspend_ops sa11x0_pm_ops = {
592 .enter = sa11x0_pm_enter,
593 .valid = suspend_valid_only_mem,
594 };
595diff -urNp linux-2.6.32.46/arch/arm/mm/fault.c linux-2.6.32.46/arch/arm/mm/fault.c
596--- linux-2.6.32.46/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
597+++ linux-2.6.32.46/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
598@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
599 }
600 #endif
601
602+#ifdef CONFIG_PAX_PAGEEXEC
603+ if (fsr & FSR_LNX_PF) {
604+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
605+ do_group_exit(SIGKILL);
606+ }
607+#endif
608+
609 tsk->thread.address = addr;
610 tsk->thread.error_code = fsr;
611 tsk->thread.trap_no = 14;
612@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
613 }
614 #endif /* CONFIG_MMU */
615
616+#ifdef CONFIG_PAX_PAGEEXEC
617+void pax_report_insns(void *pc, void *sp)
618+{
619+ long i;
620+
621+ printk(KERN_ERR "PAX: bytes at PC: ");
622+ for (i = 0; i < 20; i++) {
623+ unsigned char c;
624+ if (get_user(c, (__force unsigned char __user *)pc+i))
625+ printk(KERN_CONT "?? ");
626+ else
627+ printk(KERN_CONT "%02x ", c);
628+ }
629+ printk("\n");
630+
631+ printk(KERN_ERR "PAX: bytes at SP-4: ");
632+ for (i = -1; i < 20; i++) {
633+ unsigned long c;
634+ if (get_user(c, (__force unsigned long __user *)sp+i))
635+ printk(KERN_CONT "???????? ");
636+ else
637+ printk(KERN_CONT "%08lx ", c);
638+ }
639+ printk("\n");
640+}
641+#endif
642+
643 /*
644 * First Level Translation Fault Handler
645 *
646diff -urNp linux-2.6.32.46/arch/arm/mm/mmap.c linux-2.6.32.46/arch/arm/mm/mmap.c
647--- linux-2.6.32.46/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
648+++ linux-2.6.32.46/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
649@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
650 if (len > TASK_SIZE)
651 return -ENOMEM;
652
653+#ifdef CONFIG_PAX_RANDMMAP
654+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
655+#endif
656+
657 if (addr) {
658 if (do_align)
659 addr = COLOUR_ALIGN(addr, pgoff);
660@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
661 addr = PAGE_ALIGN(addr);
662
663 vma = find_vma(mm, addr);
664- if (TASK_SIZE - len >= addr &&
665- (!vma || addr + len <= vma->vm_start))
666+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
667 return addr;
668 }
669 if (len > mm->cached_hole_size) {
670- start_addr = addr = mm->free_area_cache;
671+ start_addr = addr = mm->free_area_cache;
672 } else {
673- start_addr = addr = TASK_UNMAPPED_BASE;
674- mm->cached_hole_size = 0;
675+ start_addr = addr = mm->mmap_base;
676+ mm->cached_hole_size = 0;
677 }
678
679 full_search:
680@@ -94,14 +97,14 @@ full_search:
681 * Start a new search - just in case we missed
682 * some holes.
683 */
684- if (start_addr != TASK_UNMAPPED_BASE) {
685- start_addr = addr = TASK_UNMAPPED_BASE;
686+ if (start_addr != mm->mmap_base) {
687+ start_addr = addr = mm->mmap_base;
688 mm->cached_hole_size = 0;
689 goto full_search;
690 }
691 return -ENOMEM;
692 }
693- if (!vma || addr + len <= vma->vm_start) {
694+ if (check_heap_stack_gap(vma, addr, len)) {
695 /*
696 * Remember the place where we stopped the search:
697 */
698diff -urNp linux-2.6.32.46/arch/arm/plat-s3c/pm.c linux-2.6.32.46/arch/arm/plat-s3c/pm.c
699--- linux-2.6.32.46/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
700+++ linux-2.6.32.46/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
701@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
702 s3c_pm_check_cleanup();
703 }
704
705-static struct platform_suspend_ops s3c_pm_ops = {
706+static const struct platform_suspend_ops s3c_pm_ops = {
707 .enter = s3c_pm_enter,
708 .prepare = s3c_pm_prepare,
709 .finish = s3c_pm_finish,
710diff -urNp linux-2.6.32.46/arch/avr32/include/asm/elf.h linux-2.6.32.46/arch/avr32/include/asm/elf.h
711--- linux-2.6.32.46/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
712+++ linux-2.6.32.46/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
713@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
714 the loader. We need to make sure that it is out of the way of the program
715 that it will "exec", and that there is sufficient room for the brk. */
716
717-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
718+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
719
720+#ifdef CONFIG_PAX_ASLR
721+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
722+
723+#define PAX_DELTA_MMAP_LEN 15
724+#define PAX_DELTA_STACK_LEN 15
725+#endif
726
727 /* This yields a mask that user programs can use to figure out what
728 instruction set this CPU supports. This could be done in user space,
729diff -urNp linux-2.6.32.46/arch/avr32/include/asm/kmap_types.h linux-2.6.32.46/arch/avr32/include/asm/kmap_types.h
730--- linux-2.6.32.46/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
731+++ linux-2.6.32.46/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
732@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
733 D(11) KM_IRQ1,
734 D(12) KM_SOFTIRQ0,
735 D(13) KM_SOFTIRQ1,
736-D(14) KM_TYPE_NR
737+D(14) KM_CLEARPAGE,
738+D(15) KM_TYPE_NR
739 };
740
741 #undef D
742diff -urNp linux-2.6.32.46/arch/avr32/mach-at32ap/pm.c linux-2.6.32.46/arch/avr32/mach-at32ap/pm.c
743--- linux-2.6.32.46/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
744+++ linux-2.6.32.46/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
745@@ -176,7 +176,7 @@ out:
746 return 0;
747 }
748
749-static struct platform_suspend_ops avr32_pm_ops = {
750+static const struct platform_suspend_ops avr32_pm_ops = {
751 .valid = avr32_pm_valid_state,
752 .enter = avr32_pm_enter,
753 };
754diff -urNp linux-2.6.32.46/arch/avr32/mm/fault.c linux-2.6.32.46/arch/avr32/mm/fault.c
755--- linux-2.6.32.46/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
756+++ linux-2.6.32.46/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
757@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
758
759 int exception_trace = 1;
760
761+#ifdef CONFIG_PAX_PAGEEXEC
762+void pax_report_insns(void *pc, void *sp)
763+{
764+ unsigned long i;
765+
766+ printk(KERN_ERR "PAX: bytes at PC: ");
767+ for (i = 0; i < 20; i++) {
768+ unsigned char c;
769+ if (get_user(c, (unsigned char *)pc+i))
770+ printk(KERN_CONT "???????? ");
771+ else
772+ printk(KERN_CONT "%02x ", c);
773+ }
774+ printk("\n");
775+}
776+#endif
777+
778 /*
779 * This routine handles page faults. It determines the address and the
780 * problem, and then passes it off to one of the appropriate routines.
781@@ -157,6 +174,16 @@ bad_area:
782 up_read(&mm->mmap_sem);
783
784 if (user_mode(regs)) {
785+
786+#ifdef CONFIG_PAX_PAGEEXEC
787+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
788+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
789+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
790+ do_group_exit(SIGKILL);
791+ }
792+ }
793+#endif
794+
795 if (exception_trace && printk_ratelimit())
796 printk("%s%s[%d]: segfault at %08lx pc %08lx "
797 "sp %08lx ecr %lu\n",
798diff -urNp linux-2.6.32.46/arch/blackfin/kernel/kgdb.c linux-2.6.32.46/arch/blackfin/kernel/kgdb.c
799--- linux-2.6.32.46/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
800+++ linux-2.6.32.46/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
801@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
802 return -1; /* this means that we do not want to exit from the handler */
803 }
804
805-struct kgdb_arch arch_kgdb_ops = {
806+const struct kgdb_arch arch_kgdb_ops = {
807 .gdb_bpt_instr = {0xa1},
808 #ifdef CONFIG_SMP
809 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
810diff -urNp linux-2.6.32.46/arch/blackfin/mach-common/pm.c linux-2.6.32.46/arch/blackfin/mach-common/pm.c
811--- linux-2.6.32.46/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
812+++ linux-2.6.32.46/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
813@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
814 return 0;
815 }
816
817-struct platform_suspend_ops bfin_pm_ops = {
818+const struct platform_suspend_ops bfin_pm_ops = {
819 .enter = bfin_pm_enter,
820 .valid = bfin_pm_valid,
821 };
822diff -urNp linux-2.6.32.46/arch/frv/include/asm/kmap_types.h linux-2.6.32.46/arch/frv/include/asm/kmap_types.h
823--- linux-2.6.32.46/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
824+++ linux-2.6.32.46/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
825@@ -23,6 +23,7 @@ enum km_type {
826 KM_IRQ1,
827 KM_SOFTIRQ0,
828 KM_SOFTIRQ1,
829+ KM_CLEARPAGE,
830 KM_TYPE_NR
831 };
832
833diff -urNp linux-2.6.32.46/arch/frv/mm/elf-fdpic.c linux-2.6.32.46/arch/frv/mm/elf-fdpic.c
834--- linux-2.6.32.46/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
835+++ linux-2.6.32.46/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
836@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
837 if (addr) {
838 addr = PAGE_ALIGN(addr);
839 vma = find_vma(current->mm, addr);
840- if (TASK_SIZE - len >= addr &&
841- (!vma || addr + len <= vma->vm_start))
842+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
843 goto success;
844 }
845
846@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
847 for (; vma; vma = vma->vm_next) {
848 if (addr > limit)
849 break;
850- if (addr + len <= vma->vm_start)
851+ if (check_heap_stack_gap(vma, addr, len))
852 goto success;
853 addr = vma->vm_end;
854 }
855@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
856 for (; vma; vma = vma->vm_next) {
857 if (addr > limit)
858 break;
859- if (addr + len <= vma->vm_start)
860+ if (check_heap_stack_gap(vma, addr, len))
861 goto success;
862 addr = vma->vm_end;
863 }
864diff -urNp linux-2.6.32.46/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.46/arch/ia64/hp/common/hwsw_iommu.c
865--- linux-2.6.32.46/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
866+++ linux-2.6.32.46/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
867@@ -17,7 +17,7 @@
868 #include <linux/swiotlb.h>
869 #include <asm/machvec.h>
870
871-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
872+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
873
874 /* swiotlb declarations & definitions: */
875 extern int swiotlb_late_init_with_default_size (size_t size);
876@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
877 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
878 }
879
880-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
881+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
882 {
883 if (use_swiotlb(dev))
884 return &swiotlb_dma_ops;
885diff -urNp linux-2.6.32.46/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.46/arch/ia64/hp/common/sba_iommu.c
886--- linux-2.6.32.46/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
887+++ linux-2.6.32.46/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
888@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
889 },
890 };
891
892-extern struct dma_map_ops swiotlb_dma_ops;
893+extern const struct dma_map_ops swiotlb_dma_ops;
894
895 static int __init
896 sba_init(void)
897@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
898
899 __setup("sbapagesize=",sba_page_override);
900
901-struct dma_map_ops sba_dma_ops = {
902+const struct dma_map_ops sba_dma_ops = {
903 .alloc_coherent = sba_alloc_coherent,
904 .free_coherent = sba_free_coherent,
905 .map_page = sba_map_page,
906diff -urNp linux-2.6.32.46/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.46/arch/ia64/ia32/binfmt_elf32.c
907--- linux-2.6.32.46/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
908+++ linux-2.6.32.46/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
909@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
910
911 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
912
913+#ifdef CONFIG_PAX_ASLR
914+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
915+
916+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
917+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
918+#endif
919+
920 /* Ugly but avoids duplication */
921 #include "../../../fs/binfmt_elf.c"
922
923diff -urNp linux-2.6.32.46/arch/ia64/ia32/ia32priv.h linux-2.6.32.46/arch/ia64/ia32/ia32priv.h
924--- linux-2.6.32.46/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
925+++ linux-2.6.32.46/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
926@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
927 #define ELF_DATA ELFDATA2LSB
928 #define ELF_ARCH EM_386
929
930-#define IA32_STACK_TOP IA32_PAGE_OFFSET
931+#ifdef CONFIG_PAX_RANDUSTACK
932+#define __IA32_DELTA_STACK (current->mm->delta_stack)
933+#else
934+#define __IA32_DELTA_STACK 0UL
935+#endif
936+
937+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
938+
939 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
940 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
941
942diff -urNp linux-2.6.32.46/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.46/arch/ia64/include/asm/dma-mapping.h
943--- linux-2.6.32.46/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
944+++ linux-2.6.32.46/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
945@@ -12,7 +12,7 @@
946
947 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
948
949-extern struct dma_map_ops *dma_ops;
950+extern const struct dma_map_ops *dma_ops;
951 extern struct ia64_machine_vector ia64_mv;
952 extern void set_iommu_machvec(void);
953
954@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
955 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
956 dma_addr_t *daddr, gfp_t gfp)
957 {
958- struct dma_map_ops *ops = platform_dma_get_ops(dev);
959+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
960 void *caddr;
961
962 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
963@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
964 static inline void dma_free_coherent(struct device *dev, size_t size,
965 void *caddr, dma_addr_t daddr)
966 {
967- struct dma_map_ops *ops = platform_dma_get_ops(dev);
968+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
969 debug_dma_free_coherent(dev, size, caddr, daddr);
970 ops->free_coherent(dev, size, caddr, daddr);
971 }
972@@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
973
974 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
975 {
976- struct dma_map_ops *ops = platform_dma_get_ops(dev);
977+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
978 return ops->mapping_error(dev, daddr);
979 }
980
981 static inline int dma_supported(struct device *dev, u64 mask)
982 {
983- struct dma_map_ops *ops = platform_dma_get_ops(dev);
984+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
985 return ops->dma_supported(dev, mask);
986 }
987
988diff -urNp linux-2.6.32.46/arch/ia64/include/asm/elf.h linux-2.6.32.46/arch/ia64/include/asm/elf.h
989--- linux-2.6.32.46/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
990+++ linux-2.6.32.46/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
991@@ -43,6 +43,13 @@
992 */
993 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
994
995+#ifdef CONFIG_PAX_ASLR
996+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
997+
998+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
999+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1000+#endif
1001+
1002 #define PT_IA_64_UNWIND 0x70000001
1003
1004 /* IA-64 relocations: */
1005diff -urNp linux-2.6.32.46/arch/ia64/include/asm/machvec.h linux-2.6.32.46/arch/ia64/include/asm/machvec.h
1006--- linux-2.6.32.46/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
1007+++ linux-2.6.32.46/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
1008@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
1009 /* DMA-mapping interface: */
1010 typedef void ia64_mv_dma_init (void);
1011 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1012-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1013+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1014
1015 /*
1016 * WARNING: The legacy I/O space is _architected_. Platforms are
1017@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
1018 # endif /* CONFIG_IA64_GENERIC */
1019
1020 extern void swiotlb_dma_init(void);
1021-extern struct dma_map_ops *dma_get_ops(struct device *);
1022+extern const struct dma_map_ops *dma_get_ops(struct device *);
1023
1024 /*
1025 * Define default versions so we can extend machvec for new platforms without having
1026diff -urNp linux-2.6.32.46/arch/ia64/include/asm/pgtable.h linux-2.6.32.46/arch/ia64/include/asm/pgtable.h
1027--- linux-2.6.32.46/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1028+++ linux-2.6.32.46/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1029@@ -12,7 +12,7 @@
1030 * David Mosberger-Tang <davidm@hpl.hp.com>
1031 */
1032
1033-
1034+#include <linux/const.h>
1035 #include <asm/mman.h>
1036 #include <asm/page.h>
1037 #include <asm/processor.h>
1038@@ -143,6 +143,17 @@
1039 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1040 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1041 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1042+
1043+#ifdef CONFIG_PAX_PAGEEXEC
1044+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1045+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1046+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1047+#else
1048+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1049+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1050+# define PAGE_COPY_NOEXEC PAGE_COPY
1051+#endif
1052+
1053 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1054 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1055 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1056diff -urNp linux-2.6.32.46/arch/ia64/include/asm/spinlock.h linux-2.6.32.46/arch/ia64/include/asm/spinlock.h
1057--- linux-2.6.32.46/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
1058+++ linux-2.6.32.46/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
1059@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
1060 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1061
1062 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1063- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1064+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1065 }
1066
1067 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1068diff -urNp linux-2.6.32.46/arch/ia64/include/asm/uaccess.h linux-2.6.32.46/arch/ia64/include/asm/uaccess.h
1069--- linux-2.6.32.46/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
1070+++ linux-2.6.32.46/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
1071@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1072 const void *__cu_from = (from); \
1073 long __cu_len = (n); \
1074 \
1075- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1076+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1077 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1078 __cu_len; \
1079 })
1080@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1081 long __cu_len = (n); \
1082 \
1083 __chk_user_ptr(__cu_from); \
1084- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1085+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1086 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1087 __cu_len; \
1088 })
1089diff -urNp linux-2.6.32.46/arch/ia64/kernel/dma-mapping.c linux-2.6.32.46/arch/ia64/kernel/dma-mapping.c
1090--- linux-2.6.32.46/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
1091+++ linux-2.6.32.46/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
1092@@ -3,7 +3,7 @@
1093 /* Set this to 1 if there is a HW IOMMU in the system */
1094 int iommu_detected __read_mostly;
1095
1096-struct dma_map_ops *dma_ops;
1097+const struct dma_map_ops *dma_ops;
1098 EXPORT_SYMBOL(dma_ops);
1099
1100 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1101@@ -16,7 +16,7 @@ static int __init dma_init(void)
1102 }
1103 fs_initcall(dma_init);
1104
1105-struct dma_map_ops *dma_get_ops(struct device *dev)
1106+const struct dma_map_ops *dma_get_ops(struct device *dev)
1107 {
1108 return dma_ops;
1109 }
1110diff -urNp linux-2.6.32.46/arch/ia64/kernel/module.c linux-2.6.32.46/arch/ia64/kernel/module.c
1111--- linux-2.6.32.46/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1112+++ linux-2.6.32.46/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1113@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1114 void
1115 module_free (struct module *mod, void *module_region)
1116 {
1117- if (mod && mod->arch.init_unw_table &&
1118- module_region == mod->module_init) {
1119+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1120 unw_remove_unwind_table(mod->arch.init_unw_table);
1121 mod->arch.init_unw_table = NULL;
1122 }
1123@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1124 }
1125
1126 static inline int
1127+in_init_rx (const struct module *mod, uint64_t addr)
1128+{
1129+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1130+}
1131+
1132+static inline int
1133+in_init_rw (const struct module *mod, uint64_t addr)
1134+{
1135+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1136+}
1137+
1138+static inline int
1139 in_init (const struct module *mod, uint64_t addr)
1140 {
1141- return addr - (uint64_t) mod->module_init < mod->init_size;
1142+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1143+}
1144+
1145+static inline int
1146+in_core_rx (const struct module *mod, uint64_t addr)
1147+{
1148+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1149+}
1150+
1151+static inline int
1152+in_core_rw (const struct module *mod, uint64_t addr)
1153+{
1154+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1155 }
1156
1157 static inline int
1158 in_core (const struct module *mod, uint64_t addr)
1159 {
1160- return addr - (uint64_t) mod->module_core < mod->core_size;
1161+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1162 }
1163
1164 static inline int
1165@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1166 break;
1167
1168 case RV_BDREL:
1169- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1170+ if (in_init_rx(mod, val))
1171+ val -= (uint64_t) mod->module_init_rx;
1172+ else if (in_init_rw(mod, val))
1173+ val -= (uint64_t) mod->module_init_rw;
1174+ else if (in_core_rx(mod, val))
1175+ val -= (uint64_t) mod->module_core_rx;
1176+ else if (in_core_rw(mod, val))
1177+ val -= (uint64_t) mod->module_core_rw;
1178 break;
1179
1180 case RV_LTV:
1181@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1182 * addresses have been selected...
1183 */
1184 uint64_t gp;
1185- if (mod->core_size > MAX_LTOFF)
1186+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1187 /*
1188 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1189 * at the end of the module.
1190 */
1191- gp = mod->core_size - MAX_LTOFF / 2;
1192+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1193 else
1194- gp = mod->core_size / 2;
1195- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1196+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1197+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1198 mod->arch.gp = gp;
1199 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1200 }
1201diff -urNp linux-2.6.32.46/arch/ia64/kernel/pci-dma.c linux-2.6.32.46/arch/ia64/kernel/pci-dma.c
1202--- linux-2.6.32.46/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1203+++ linux-2.6.32.46/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1204@@ -43,7 +43,7 @@ struct device fallback_dev = {
1205 .dma_mask = &fallback_dev.coherent_dma_mask,
1206 };
1207
1208-extern struct dma_map_ops intel_dma_ops;
1209+extern const struct dma_map_ops intel_dma_ops;
1210
1211 static int __init pci_iommu_init(void)
1212 {
1213@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1214 }
1215 EXPORT_SYMBOL(iommu_dma_supported);
1216
1217+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1218+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1219+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1220+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1221+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1222+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1223+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1224+
1225+static const struct dma_map_ops intel_iommu_dma_ops = {
1226+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1227+ .alloc_coherent = intel_alloc_coherent,
1228+ .free_coherent = intel_free_coherent,
1229+ .map_sg = intel_map_sg,
1230+ .unmap_sg = intel_unmap_sg,
1231+ .map_page = intel_map_page,
1232+ .unmap_page = intel_unmap_page,
1233+ .mapping_error = intel_mapping_error,
1234+
1235+ .sync_single_for_cpu = machvec_dma_sync_single,
1236+ .sync_sg_for_cpu = machvec_dma_sync_sg,
1237+ .sync_single_for_device = machvec_dma_sync_single,
1238+ .sync_sg_for_device = machvec_dma_sync_sg,
1239+ .dma_supported = iommu_dma_supported,
1240+};
1241+
1242 void __init pci_iommu_alloc(void)
1243 {
1244- dma_ops = &intel_dma_ops;
1245-
1246- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1247- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1248- dma_ops->sync_single_for_device = machvec_dma_sync_single;
1249- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1250- dma_ops->dma_supported = iommu_dma_supported;
1251+ dma_ops = &intel_iommu_dma_ops;
1252
1253 /*
1254 * The order of these functions is important for
1255diff -urNp linux-2.6.32.46/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.46/arch/ia64/kernel/pci-swiotlb.c
1256--- linux-2.6.32.46/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1257+++ linux-2.6.32.46/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1258@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1259 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1260 }
1261
1262-struct dma_map_ops swiotlb_dma_ops = {
1263+const struct dma_map_ops swiotlb_dma_ops = {
1264 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1265 .free_coherent = swiotlb_free_coherent,
1266 .map_page = swiotlb_map_page,
1267diff -urNp linux-2.6.32.46/arch/ia64/kernel/sys_ia64.c linux-2.6.32.46/arch/ia64/kernel/sys_ia64.c
1268--- linux-2.6.32.46/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1269+++ linux-2.6.32.46/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1270@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1271 if (REGION_NUMBER(addr) == RGN_HPAGE)
1272 addr = 0;
1273 #endif
1274+
1275+#ifdef CONFIG_PAX_RANDMMAP
1276+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1277+ addr = mm->free_area_cache;
1278+ else
1279+#endif
1280+
1281 if (!addr)
1282 addr = mm->free_area_cache;
1283
1284@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1285 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1286 /* At this point: (!vma || addr < vma->vm_end). */
1287 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1288- if (start_addr != TASK_UNMAPPED_BASE) {
1289+ if (start_addr != mm->mmap_base) {
1290 /* Start a new search --- just in case we missed some holes. */
1291- addr = TASK_UNMAPPED_BASE;
1292+ addr = mm->mmap_base;
1293 goto full_search;
1294 }
1295 return -ENOMEM;
1296 }
1297- if (!vma || addr + len <= vma->vm_start) {
1298+ if (check_heap_stack_gap(vma, addr, len)) {
1299 /* Remember the address where we stopped this search: */
1300 mm->free_area_cache = addr + len;
1301 return addr;
1302diff -urNp linux-2.6.32.46/arch/ia64/kernel/topology.c linux-2.6.32.46/arch/ia64/kernel/topology.c
1303--- linux-2.6.32.46/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1304+++ linux-2.6.32.46/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1305@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1306 return ret;
1307 }
1308
1309-static struct sysfs_ops cache_sysfs_ops = {
1310+static const struct sysfs_ops cache_sysfs_ops = {
1311 .show = cache_show
1312 };
1313
1314diff -urNp linux-2.6.32.46/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.46/arch/ia64/kernel/vmlinux.lds.S
1315--- linux-2.6.32.46/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1316+++ linux-2.6.32.46/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1317@@ -190,7 +190,7 @@ SECTIONS
1318 /* Per-cpu data: */
1319 . = ALIGN(PERCPU_PAGE_SIZE);
1320 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1321- __phys_per_cpu_start = __per_cpu_load;
1322+ __phys_per_cpu_start = per_cpu_load;
1323 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1324 * into percpu page size
1325 */
1326diff -urNp linux-2.6.32.46/arch/ia64/mm/fault.c linux-2.6.32.46/arch/ia64/mm/fault.c
1327--- linux-2.6.32.46/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1328+++ linux-2.6.32.46/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1329@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1330 return pte_present(pte);
1331 }
1332
1333+#ifdef CONFIG_PAX_PAGEEXEC
1334+void pax_report_insns(void *pc, void *sp)
1335+{
1336+ unsigned long i;
1337+
1338+ printk(KERN_ERR "PAX: bytes at PC: ");
1339+ for (i = 0; i < 8; i++) {
1340+ unsigned int c;
1341+ if (get_user(c, (unsigned int *)pc+i))
1342+ printk(KERN_CONT "???????? ");
1343+ else
1344+ printk(KERN_CONT "%08x ", c);
1345+ }
1346+ printk("\n");
1347+}
1348+#endif
1349+
1350 void __kprobes
1351 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1352 {
1353@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1354 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1355 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1356
1357- if ((vma->vm_flags & mask) != mask)
1358+ if ((vma->vm_flags & mask) != mask) {
1359+
1360+#ifdef CONFIG_PAX_PAGEEXEC
1361+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1362+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1363+ goto bad_area;
1364+
1365+ up_read(&mm->mmap_sem);
1366+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1367+ do_group_exit(SIGKILL);
1368+ }
1369+#endif
1370+
1371 goto bad_area;
1372
1373+ }
1374+
1375 survive:
1376 /*
1377 * If for any reason at all we couldn't handle the fault, make
1378diff -urNp linux-2.6.32.46/arch/ia64/mm/hugetlbpage.c linux-2.6.32.46/arch/ia64/mm/hugetlbpage.c
1379--- linux-2.6.32.46/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1380+++ linux-2.6.32.46/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1381@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1382 /* At this point: (!vmm || addr < vmm->vm_end). */
1383 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1384 return -ENOMEM;
1385- if (!vmm || (addr + len) <= vmm->vm_start)
1386+ if (check_heap_stack_gap(vmm, addr, len))
1387 return addr;
1388 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1389 }
1390diff -urNp linux-2.6.32.46/arch/ia64/mm/init.c linux-2.6.32.46/arch/ia64/mm/init.c
1391--- linux-2.6.32.46/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1392+++ linux-2.6.32.46/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1393@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1394 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1395 vma->vm_end = vma->vm_start + PAGE_SIZE;
1396 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1397+
1398+#ifdef CONFIG_PAX_PAGEEXEC
1399+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1400+ vma->vm_flags &= ~VM_EXEC;
1401+
1402+#ifdef CONFIG_PAX_MPROTECT
1403+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1404+ vma->vm_flags &= ~VM_MAYEXEC;
1405+#endif
1406+
1407+ }
1408+#endif
1409+
1410 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1411 down_write(&current->mm->mmap_sem);
1412 if (insert_vm_struct(current->mm, vma)) {
1413diff -urNp linux-2.6.32.46/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.46/arch/ia64/sn/pci/pci_dma.c
1414--- linux-2.6.32.46/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1415+++ linux-2.6.32.46/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1416@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1417 return ret;
1418 }
1419
1420-static struct dma_map_ops sn_dma_ops = {
1421+static const struct dma_map_ops sn_dma_ops = {
1422 .alloc_coherent = sn_dma_alloc_coherent,
1423 .free_coherent = sn_dma_free_coherent,
1424 .map_page = sn_dma_map_page,
1425diff -urNp linux-2.6.32.46/arch/m32r/lib/usercopy.c linux-2.6.32.46/arch/m32r/lib/usercopy.c
1426--- linux-2.6.32.46/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1427+++ linux-2.6.32.46/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1428@@ -14,6 +14,9 @@
1429 unsigned long
1430 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1431 {
1432+ if ((long)n < 0)
1433+ return n;
1434+
1435 prefetch(from);
1436 if (access_ok(VERIFY_WRITE, to, n))
1437 __copy_user(to,from,n);
1438@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1439 unsigned long
1440 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1441 {
1442+ if ((long)n < 0)
1443+ return n;
1444+
1445 prefetchw(to);
1446 if (access_ok(VERIFY_READ, from, n))
1447 __copy_user_zeroing(to,from,n);
1448diff -urNp linux-2.6.32.46/arch/mips/alchemy/devboards/pm.c linux-2.6.32.46/arch/mips/alchemy/devboards/pm.c
1449--- linux-2.6.32.46/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1450+++ linux-2.6.32.46/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1451@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1452
1453 }
1454
1455-static struct platform_suspend_ops db1x_pm_ops = {
1456+static const struct platform_suspend_ops db1x_pm_ops = {
1457 .valid = suspend_valid_only_mem,
1458 .begin = db1x_pm_begin,
1459 .enter = db1x_pm_enter,
1460diff -urNp linux-2.6.32.46/arch/mips/include/asm/elf.h linux-2.6.32.46/arch/mips/include/asm/elf.h
1461--- linux-2.6.32.46/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1462+++ linux-2.6.32.46/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1463@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1464 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1465 #endif
1466
1467+#ifdef CONFIG_PAX_ASLR
1468+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1469+
1470+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1471+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1472+#endif
1473+
1474 #endif /* _ASM_ELF_H */
1475diff -urNp linux-2.6.32.46/arch/mips/include/asm/page.h linux-2.6.32.46/arch/mips/include/asm/page.h
1476--- linux-2.6.32.46/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1477+++ linux-2.6.32.46/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1478@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1479 #ifdef CONFIG_CPU_MIPS32
1480 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1481 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1482- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1483+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1484 #else
1485 typedef struct { unsigned long long pte; } pte_t;
1486 #define pte_val(x) ((x).pte)
1487diff -urNp linux-2.6.32.46/arch/mips/include/asm/reboot.h linux-2.6.32.46/arch/mips/include/asm/reboot.h
1488--- linux-2.6.32.46/arch/mips/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
1489+++ linux-2.6.32.46/arch/mips/include/asm/reboot.h 2011-08-21 17:35:02.000000000 -0400
1490@@ -9,7 +9,7 @@
1491 #ifndef _ASM_REBOOT_H
1492 #define _ASM_REBOOT_H
1493
1494-extern void (*_machine_restart)(char *command);
1495-extern void (*_machine_halt)(void);
1496+extern void (*__noreturn _machine_restart)(char *command);
1497+extern void (*__noreturn _machine_halt)(void);
1498
1499 #endif /* _ASM_REBOOT_H */
1500diff -urNp linux-2.6.32.46/arch/mips/include/asm/system.h linux-2.6.32.46/arch/mips/include/asm/system.h
1501--- linux-2.6.32.46/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1502+++ linux-2.6.32.46/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1503@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1504 */
1505 #define __ARCH_WANT_UNLOCKED_CTXSW
1506
1507-extern unsigned long arch_align_stack(unsigned long sp);
1508+#define arch_align_stack(x) ((x) & ~0xfUL)
1509
1510 #endif /* _ASM_SYSTEM_H */
1511diff -urNp linux-2.6.32.46/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.46/arch/mips/kernel/binfmt_elfn32.c
1512--- linux-2.6.32.46/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1513+++ linux-2.6.32.46/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1514@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1515 #undef ELF_ET_DYN_BASE
1516 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1517
1518+#ifdef CONFIG_PAX_ASLR
1519+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1520+
1521+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1522+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1523+#endif
1524+
1525 #include <asm/processor.h>
1526 #include <linux/module.h>
1527 #include <linux/elfcore.h>
1528diff -urNp linux-2.6.32.46/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.46/arch/mips/kernel/binfmt_elfo32.c
1529--- linux-2.6.32.46/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1530+++ linux-2.6.32.46/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1531@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1532 #undef ELF_ET_DYN_BASE
1533 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1534
1535+#ifdef CONFIG_PAX_ASLR
1536+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1537+
1538+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1539+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1540+#endif
1541+
1542 #include <asm/processor.h>
1543
1544 /*
1545diff -urNp linux-2.6.32.46/arch/mips/kernel/kgdb.c linux-2.6.32.46/arch/mips/kernel/kgdb.c
1546--- linux-2.6.32.46/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1547+++ linux-2.6.32.46/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1548@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1549 return -1;
1550 }
1551
1552+/* cannot be const */
1553 struct kgdb_arch arch_kgdb_ops;
1554
1555 /*
1556diff -urNp linux-2.6.32.46/arch/mips/kernel/process.c linux-2.6.32.46/arch/mips/kernel/process.c
1557--- linux-2.6.32.46/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1558+++ linux-2.6.32.46/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1559@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1560 out:
1561 return pc;
1562 }
1563-
1564-/*
1565- * Don't forget that the stack pointer must be aligned on a 8 bytes
1566- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1567- */
1568-unsigned long arch_align_stack(unsigned long sp)
1569-{
1570- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1571- sp -= get_random_int() & ~PAGE_MASK;
1572-
1573- return sp & ALMASK;
1574-}
1575diff -urNp linux-2.6.32.46/arch/mips/kernel/reset.c linux-2.6.32.46/arch/mips/kernel/reset.c
1576--- linux-2.6.32.46/arch/mips/kernel/reset.c 2011-03-27 14:31:47.000000000 -0400
1577+++ linux-2.6.32.46/arch/mips/kernel/reset.c 2011-08-21 17:35:26.000000000 -0400
1578@@ -19,8 +19,8 @@
1579 * So handle all using function pointers to machine specific
1580 * functions.
1581 */
1582-void (*_machine_restart)(char *command);
1583-void (*_machine_halt)(void);
1584+void (*__noreturn _machine_restart)(char *command);
1585+void (*__noreturn _machine_halt)(void);
1586 void (*pm_power_off)(void);
1587
1588 EXPORT_SYMBOL(pm_power_off);
1589@@ -29,16 +29,19 @@ void machine_restart(char *command)
1590 {
1591 if (_machine_restart)
1592 _machine_restart(command);
1593+ BUG();
1594 }
1595
1596 void machine_halt(void)
1597 {
1598 if (_machine_halt)
1599 _machine_halt();
1600+ BUG();
1601 }
1602
1603 void machine_power_off(void)
1604 {
1605 if (pm_power_off)
1606 pm_power_off();
1607+ BUG();
1608 }
1609diff -urNp linux-2.6.32.46/arch/mips/kernel/syscall.c linux-2.6.32.46/arch/mips/kernel/syscall.c
1610--- linux-2.6.32.46/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1611+++ linux-2.6.32.46/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1612@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1613 do_color_align = 0;
1614 if (filp || (flags & MAP_SHARED))
1615 do_color_align = 1;
1616+
1617+#ifdef CONFIG_PAX_RANDMMAP
1618+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1619+#endif
1620+
1621 if (addr) {
1622 if (do_color_align)
1623 addr = COLOUR_ALIGN(addr, pgoff);
1624 else
1625 addr = PAGE_ALIGN(addr);
1626 vmm = find_vma(current->mm, addr);
1627- if (task_size - len >= addr &&
1628- (!vmm || addr + len <= vmm->vm_start))
1629+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1630 return addr;
1631 }
1632- addr = TASK_UNMAPPED_BASE;
1633+ addr = current->mm->mmap_base;
1634 if (do_color_align)
1635 addr = COLOUR_ALIGN(addr, pgoff);
1636 else
1637@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1638 /* At this point: (!vmm || addr < vmm->vm_end). */
1639 if (task_size - len < addr)
1640 return -ENOMEM;
1641- if (!vmm || addr + len <= vmm->vm_start)
1642+ if (check_heap_stack_gap(vmm, addr, len))
1643 return addr;
1644 addr = vmm->vm_end;
1645 if (do_color_align)
1646diff -urNp linux-2.6.32.46/arch/mips/Makefile linux-2.6.32.46/arch/mips/Makefile
1647--- linux-2.6.32.46/arch/mips/Makefile 2011-03-27 14:31:47.000000000 -0400
1648+++ linux-2.6.32.46/arch/mips/Makefile 2011-08-21 19:26:52.000000000 -0400
1649@@ -51,6 +51,8 @@ endif
1650 cflags-y := -ffunction-sections
1651 cflags-y += $(call cc-option, -mno-check-zero-division)
1652
1653+cflags-y += -Wno-sign-compare -Wno-extra
1654+
1655 ifdef CONFIG_32BIT
1656 ld-emul = $(32bit-emul)
1657 vmlinux-32 = vmlinux
1658diff -urNp linux-2.6.32.46/arch/mips/mm/fault.c linux-2.6.32.46/arch/mips/mm/fault.c
1659--- linux-2.6.32.46/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1660+++ linux-2.6.32.46/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1661@@ -26,6 +26,23 @@
1662 #include <asm/ptrace.h>
1663 #include <asm/highmem.h> /* For VMALLOC_END */
1664
1665+#ifdef CONFIG_PAX_PAGEEXEC
1666+void pax_report_insns(void *pc, void *sp)
1667+{
1668+ unsigned long i;
1669+
1670+ printk(KERN_ERR "PAX: bytes at PC: ");
1671+ for (i = 0; i < 5; i++) {
1672+ unsigned int c;
1673+ if (get_user(c, (unsigned int *)pc+i))
1674+ printk(KERN_CONT "???????? ");
1675+ else
1676+ printk(KERN_CONT "%08x ", c);
1677+ }
1678+ printk("\n");
1679+}
1680+#endif
1681+
1682 /*
1683 * This routine handles page faults. It determines the address,
1684 * and the problem, and then passes it off to one of the appropriate
1685diff -urNp linux-2.6.32.46/arch/parisc/include/asm/elf.h linux-2.6.32.46/arch/parisc/include/asm/elf.h
1686--- linux-2.6.32.46/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1687+++ linux-2.6.32.46/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1688@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1689
1690 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1691
1692+#ifdef CONFIG_PAX_ASLR
1693+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1694+
1695+#define PAX_DELTA_MMAP_LEN 16
1696+#define PAX_DELTA_STACK_LEN 16
1697+#endif
1698+
1699 /* This yields a mask that user programs can use to figure out what
1700 instruction set this CPU supports. This could be done in user space,
1701 but it's not easy, and we've already done it here. */
1702diff -urNp linux-2.6.32.46/arch/parisc/include/asm/pgtable.h linux-2.6.32.46/arch/parisc/include/asm/pgtable.h
1703--- linux-2.6.32.46/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1704+++ linux-2.6.32.46/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1705@@ -207,6 +207,17 @@
1706 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1707 #define PAGE_COPY PAGE_EXECREAD
1708 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1709+
1710+#ifdef CONFIG_PAX_PAGEEXEC
1711+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1712+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1713+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1714+#else
1715+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1716+# define PAGE_COPY_NOEXEC PAGE_COPY
1717+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1718+#endif
1719+
1720 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1721 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1722 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1723diff -urNp linux-2.6.32.46/arch/parisc/kernel/module.c linux-2.6.32.46/arch/parisc/kernel/module.c
1724--- linux-2.6.32.46/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1725+++ linux-2.6.32.46/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1726@@ -95,16 +95,38 @@
1727
1728 /* three functions to determine where in the module core
1729 * or init pieces the location is */
1730+static inline int in_init_rx(struct module *me, void *loc)
1731+{
1732+ return (loc >= me->module_init_rx &&
1733+ loc < (me->module_init_rx + me->init_size_rx));
1734+}
1735+
1736+static inline int in_init_rw(struct module *me, void *loc)
1737+{
1738+ return (loc >= me->module_init_rw &&
1739+ loc < (me->module_init_rw + me->init_size_rw));
1740+}
1741+
1742 static inline int in_init(struct module *me, void *loc)
1743 {
1744- return (loc >= me->module_init &&
1745- loc <= (me->module_init + me->init_size));
1746+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1747+}
1748+
1749+static inline int in_core_rx(struct module *me, void *loc)
1750+{
1751+ return (loc >= me->module_core_rx &&
1752+ loc < (me->module_core_rx + me->core_size_rx));
1753+}
1754+
1755+static inline int in_core_rw(struct module *me, void *loc)
1756+{
1757+ return (loc >= me->module_core_rw &&
1758+ loc < (me->module_core_rw + me->core_size_rw));
1759 }
1760
1761 static inline int in_core(struct module *me, void *loc)
1762 {
1763- return (loc >= me->module_core &&
1764- loc <= (me->module_core + me->core_size));
1765+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1766 }
1767
1768 static inline int in_local(struct module *me, void *loc)
1769@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1770 }
1771
1772 /* align things a bit */
1773- me->core_size = ALIGN(me->core_size, 16);
1774- me->arch.got_offset = me->core_size;
1775- me->core_size += gots * sizeof(struct got_entry);
1776-
1777- me->core_size = ALIGN(me->core_size, 16);
1778- me->arch.fdesc_offset = me->core_size;
1779- me->core_size += fdescs * sizeof(Elf_Fdesc);
1780+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1781+ me->arch.got_offset = me->core_size_rw;
1782+ me->core_size_rw += gots * sizeof(struct got_entry);
1783+
1784+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1785+ me->arch.fdesc_offset = me->core_size_rw;
1786+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1787
1788 me->arch.got_max = gots;
1789 me->arch.fdesc_max = fdescs;
1790@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1791
1792 BUG_ON(value == 0);
1793
1794- got = me->module_core + me->arch.got_offset;
1795+ got = me->module_core_rw + me->arch.got_offset;
1796 for (i = 0; got[i].addr; i++)
1797 if (got[i].addr == value)
1798 goto out;
1799@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1800 #ifdef CONFIG_64BIT
1801 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1802 {
1803- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1804+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1805
1806 if (!value) {
1807 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1808@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1809
1810 /* Create new one */
1811 fdesc->addr = value;
1812- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1813+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1814 return (Elf_Addr)fdesc;
1815 }
1816 #endif /* CONFIG_64BIT */
1817@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1818
1819 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1820 end = table + sechdrs[me->arch.unwind_section].sh_size;
1821- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1822+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1823
1824 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1825 me->arch.unwind_section, table, end, gp);
1826diff -urNp linux-2.6.32.46/arch/parisc/kernel/sys_parisc.c linux-2.6.32.46/arch/parisc/kernel/sys_parisc.c
1827--- linux-2.6.32.46/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1828+++ linux-2.6.32.46/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1829@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1830 /* At this point: (!vma || addr < vma->vm_end). */
1831 if (TASK_SIZE - len < addr)
1832 return -ENOMEM;
1833- if (!vma || addr + len <= vma->vm_start)
1834+ if (check_heap_stack_gap(vma, addr, len))
1835 return addr;
1836 addr = vma->vm_end;
1837 }
1838@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1839 /* At this point: (!vma || addr < vma->vm_end). */
1840 if (TASK_SIZE - len < addr)
1841 return -ENOMEM;
1842- if (!vma || addr + len <= vma->vm_start)
1843+ if (check_heap_stack_gap(vma, addr, len))
1844 return addr;
1845 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1846 if (addr < vma->vm_end) /* handle wraparound */
1847@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1848 if (flags & MAP_FIXED)
1849 return addr;
1850 if (!addr)
1851- addr = TASK_UNMAPPED_BASE;
1852+ addr = current->mm->mmap_base;
1853
1854 if (filp) {
1855 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1856diff -urNp linux-2.6.32.46/arch/parisc/kernel/traps.c linux-2.6.32.46/arch/parisc/kernel/traps.c
1857--- linux-2.6.32.46/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1858+++ linux-2.6.32.46/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1859@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1860
1861 down_read(&current->mm->mmap_sem);
1862 vma = find_vma(current->mm,regs->iaoq[0]);
1863- if (vma && (regs->iaoq[0] >= vma->vm_start)
1864- && (vma->vm_flags & VM_EXEC)) {
1865-
1866+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1867 fault_address = regs->iaoq[0];
1868 fault_space = regs->iasq[0];
1869
1870diff -urNp linux-2.6.32.46/arch/parisc/mm/fault.c linux-2.6.32.46/arch/parisc/mm/fault.c
1871--- linux-2.6.32.46/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1872+++ linux-2.6.32.46/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1873@@ -15,6 +15,7 @@
1874 #include <linux/sched.h>
1875 #include <linux/interrupt.h>
1876 #include <linux/module.h>
1877+#include <linux/unistd.h>
1878
1879 #include <asm/uaccess.h>
1880 #include <asm/traps.h>
1881@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1882 static unsigned long
1883 parisc_acctyp(unsigned long code, unsigned int inst)
1884 {
1885- if (code == 6 || code == 16)
1886+ if (code == 6 || code == 7 || code == 16)
1887 return VM_EXEC;
1888
1889 switch (inst & 0xf0000000) {
1890@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1891 }
1892 #endif
1893
1894+#ifdef CONFIG_PAX_PAGEEXEC
1895+/*
1896+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1897+ *
1898+ * returns 1 when task should be killed
1899+ * 2 when rt_sigreturn trampoline was detected
1900+ * 3 when unpatched PLT trampoline was detected
1901+ */
1902+static int pax_handle_fetch_fault(struct pt_regs *regs)
1903+{
1904+
1905+#ifdef CONFIG_PAX_EMUPLT
1906+ int err;
1907+
1908+ do { /* PaX: unpatched PLT emulation */
1909+ unsigned int bl, depwi;
1910+
1911+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1912+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1913+
1914+ if (err)
1915+ break;
1916+
1917+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1918+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1919+
1920+ err = get_user(ldw, (unsigned int *)addr);
1921+ err |= get_user(bv, (unsigned int *)(addr+4));
1922+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1923+
1924+ if (err)
1925+ break;
1926+
1927+ if (ldw == 0x0E801096U &&
1928+ bv == 0xEAC0C000U &&
1929+ ldw2 == 0x0E881095U)
1930+ {
1931+ unsigned int resolver, map;
1932+
1933+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1934+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1935+ if (err)
1936+ break;
1937+
1938+ regs->gr[20] = instruction_pointer(regs)+8;
1939+ regs->gr[21] = map;
1940+ regs->gr[22] = resolver;
1941+ regs->iaoq[0] = resolver | 3UL;
1942+ regs->iaoq[1] = regs->iaoq[0] + 4;
1943+ return 3;
1944+ }
1945+ }
1946+ } while (0);
1947+#endif
1948+
1949+#ifdef CONFIG_PAX_EMUTRAMP
1950+
1951+#ifndef CONFIG_PAX_EMUSIGRT
1952+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1953+ return 1;
1954+#endif
1955+
1956+ do { /* PaX: rt_sigreturn emulation */
1957+ unsigned int ldi1, ldi2, bel, nop;
1958+
1959+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1960+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1961+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1962+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1963+
1964+ if (err)
1965+ break;
1966+
1967+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1968+ ldi2 == 0x3414015AU &&
1969+ bel == 0xE4008200U &&
1970+ nop == 0x08000240U)
1971+ {
1972+ regs->gr[25] = (ldi1 & 2) >> 1;
1973+ regs->gr[20] = __NR_rt_sigreturn;
1974+ regs->gr[31] = regs->iaoq[1] + 16;
1975+ regs->sr[0] = regs->iasq[1];
1976+ regs->iaoq[0] = 0x100UL;
1977+ regs->iaoq[1] = regs->iaoq[0] + 4;
1978+ regs->iasq[0] = regs->sr[2];
1979+ regs->iasq[1] = regs->sr[2];
1980+ return 2;
1981+ }
1982+ } while (0);
1983+#endif
1984+
1985+ return 1;
1986+}
1987+
1988+void pax_report_insns(void *pc, void *sp)
1989+{
1990+ unsigned long i;
1991+
1992+ printk(KERN_ERR "PAX: bytes at PC: ");
1993+ for (i = 0; i < 5; i++) {
1994+ unsigned int c;
1995+ if (get_user(c, (unsigned int *)pc+i))
1996+ printk(KERN_CONT "???????? ");
1997+ else
1998+ printk(KERN_CONT "%08x ", c);
1999+ }
2000+ printk("\n");
2001+}
2002+#endif
2003+
2004 int fixup_exception(struct pt_regs *regs)
2005 {
2006 const struct exception_table_entry *fix;
2007@@ -192,8 +303,33 @@ good_area:
2008
2009 acc_type = parisc_acctyp(code,regs->iir);
2010
2011- if ((vma->vm_flags & acc_type) != acc_type)
2012+ if ((vma->vm_flags & acc_type) != acc_type) {
2013+
2014+#ifdef CONFIG_PAX_PAGEEXEC
2015+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2016+ (address & ~3UL) == instruction_pointer(regs))
2017+ {
2018+ up_read(&mm->mmap_sem);
2019+ switch (pax_handle_fetch_fault(regs)) {
2020+
2021+#ifdef CONFIG_PAX_EMUPLT
2022+ case 3:
2023+ return;
2024+#endif
2025+
2026+#ifdef CONFIG_PAX_EMUTRAMP
2027+ case 2:
2028+ return;
2029+#endif
2030+
2031+ }
2032+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2033+ do_group_exit(SIGKILL);
2034+ }
2035+#endif
2036+
2037 goto bad_area;
2038+ }
2039
2040 /*
2041 * If for any reason at all we couldn't handle the fault, make
2042diff -urNp linux-2.6.32.46/arch/powerpc/include/asm/device.h linux-2.6.32.46/arch/powerpc/include/asm/device.h
2043--- linux-2.6.32.46/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
2044+++ linux-2.6.32.46/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
2045@@ -14,7 +14,7 @@ struct dev_archdata {
2046 struct device_node *of_node;
2047
2048 /* DMA operations on that device */
2049- struct dma_map_ops *dma_ops;
2050+ const struct dma_map_ops *dma_ops;
2051
2052 /*
2053 * When an iommu is in use, dma_data is used as a ptr to the base of the
2054diff -urNp linux-2.6.32.46/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.46/arch/powerpc/include/asm/dma-mapping.h
2055--- linux-2.6.32.46/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
2056+++ linux-2.6.32.46/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
2057@@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
2058 #ifdef CONFIG_PPC64
2059 extern struct dma_map_ops dma_iommu_ops;
2060 #endif
2061-extern struct dma_map_ops dma_direct_ops;
2062+extern const struct dma_map_ops dma_direct_ops;
2063
2064-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2065+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2066 {
2067 /* We don't handle the NULL dev case for ISA for now. We could
2068 * do it via an out of line call but it is not needed for now. The
2069@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
2070 return dev->archdata.dma_ops;
2071 }
2072
2073-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2074+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2075 {
2076 dev->archdata.dma_ops = ops;
2077 }
2078@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
2079
2080 static inline int dma_supported(struct device *dev, u64 mask)
2081 {
2082- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2083+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2084
2085 if (unlikely(dma_ops == NULL))
2086 return 0;
2087@@ -132,7 +132,7 @@ static inline int dma_supported(struct d
2088
2089 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2090 {
2091- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2092+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2093
2094 if (unlikely(dma_ops == NULL))
2095 return -EIO;
2096@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
2097 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2098 dma_addr_t *dma_handle, gfp_t flag)
2099 {
2100- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2101+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2102 void *cpu_addr;
2103
2104 BUG_ON(!dma_ops);
2105@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
2106 static inline void dma_free_coherent(struct device *dev, size_t size,
2107 void *cpu_addr, dma_addr_t dma_handle)
2108 {
2109- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2110+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2111
2112 BUG_ON(!dma_ops);
2113
2114@@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
2115
2116 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2117 {
2118- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2119+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2120
2121 if (dma_ops->mapping_error)
2122 return dma_ops->mapping_error(dev, dma_addr);
2123diff -urNp linux-2.6.32.46/arch/powerpc/include/asm/elf.h linux-2.6.32.46/arch/powerpc/include/asm/elf.h
2124--- linux-2.6.32.46/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2125+++ linux-2.6.32.46/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2126@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2127 the loader. We need to make sure that it is out of the way of the program
2128 that it will "exec", and that there is sufficient room for the brk. */
2129
2130-extern unsigned long randomize_et_dyn(unsigned long base);
2131-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2132+#define ELF_ET_DYN_BASE (0x20000000)
2133+
2134+#ifdef CONFIG_PAX_ASLR
2135+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2136+
2137+#ifdef __powerpc64__
2138+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2139+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2140+#else
2141+#define PAX_DELTA_MMAP_LEN 15
2142+#define PAX_DELTA_STACK_LEN 15
2143+#endif
2144+#endif
2145
2146 /*
2147 * Our registers are always unsigned longs, whether we're a 32 bit
2148@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
2149 (0x7ff >> (PAGE_SHIFT - 12)) : \
2150 (0x3ffff >> (PAGE_SHIFT - 12)))
2151
2152-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2153-#define arch_randomize_brk arch_randomize_brk
2154-
2155 #endif /* __KERNEL__ */
2156
2157 /*
2158diff -urNp linux-2.6.32.46/arch/powerpc/include/asm/iommu.h linux-2.6.32.46/arch/powerpc/include/asm/iommu.h
2159--- linux-2.6.32.46/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
2160+++ linux-2.6.32.46/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
2161@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2162 extern void iommu_init_early_dart(void);
2163 extern void iommu_init_early_pasemi(void);
2164
2165+/* dma-iommu.c */
2166+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2167+
2168 #ifdef CONFIG_PCI
2169 extern void pci_iommu_init(void);
2170 extern void pci_direct_iommu_init(void);
2171diff -urNp linux-2.6.32.46/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.46/arch/powerpc/include/asm/kmap_types.h
2172--- linux-2.6.32.46/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
2173+++ linux-2.6.32.46/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
2174@@ -26,6 +26,7 @@ enum km_type {
2175 KM_SOFTIRQ1,
2176 KM_PPC_SYNC_PAGE,
2177 KM_PPC_SYNC_ICACHE,
2178+ KM_CLEARPAGE,
2179 KM_TYPE_NR
2180 };
2181
2182diff -urNp linux-2.6.32.46/arch/powerpc/include/asm/page_64.h linux-2.6.32.46/arch/powerpc/include/asm/page_64.h
2183--- linux-2.6.32.46/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
2184+++ linux-2.6.32.46/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
2185@@ -180,15 +180,18 @@ do { \
2186 * stack by default, so in the absense of a PT_GNU_STACK program header
2187 * we turn execute permission off.
2188 */
2189-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2190- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2191+#define VM_STACK_DEFAULT_FLAGS32 \
2192+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2193+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2194
2195 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2196 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2197
2198+#ifndef CONFIG_PAX_PAGEEXEC
2199 #define VM_STACK_DEFAULT_FLAGS \
2200 (test_thread_flag(TIF_32BIT) ? \
2201 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2202+#endif
2203
2204 #include <asm-generic/getorder.h>
2205
2206diff -urNp linux-2.6.32.46/arch/powerpc/include/asm/page.h linux-2.6.32.46/arch/powerpc/include/asm/page.h
2207--- linux-2.6.32.46/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2208+++ linux-2.6.32.46/arch/powerpc/include/asm/page.h 2011-08-21 16:07:39.000000000 -0400
2209@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2210 * and needs to be executable. This means the whole heap ends
2211 * up being executable.
2212 */
2213-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2214- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2215+#define VM_DATA_DEFAULT_FLAGS32 \
2216+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2217+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2218
2219 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2220 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2221@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2222 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2223 #endif
2224
2225+#define ktla_ktva(addr) (addr)
2226+#define ktva_ktla(addr) (addr)
2227+
2228 #ifndef __ASSEMBLY__
2229
2230 #undef STRICT_MM_TYPECHECKS
2231diff -urNp linux-2.6.32.46/arch/powerpc/include/asm/pci.h linux-2.6.32.46/arch/powerpc/include/asm/pci.h
2232--- linux-2.6.32.46/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2233+++ linux-2.6.32.46/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2234@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2235 }
2236
2237 #ifdef CONFIG_PCI
2238-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2239-extern struct dma_map_ops *get_pci_dma_ops(void);
2240+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2241+extern const struct dma_map_ops *get_pci_dma_ops(void);
2242 #else /* CONFIG_PCI */
2243 #define set_pci_dma_ops(d)
2244 #define get_pci_dma_ops() NULL
2245diff -urNp linux-2.6.32.46/arch/powerpc/include/asm/pgtable.h linux-2.6.32.46/arch/powerpc/include/asm/pgtable.h
2246--- linux-2.6.32.46/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2247+++ linux-2.6.32.46/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2248@@ -2,6 +2,7 @@
2249 #define _ASM_POWERPC_PGTABLE_H
2250 #ifdef __KERNEL__
2251
2252+#include <linux/const.h>
2253 #ifndef __ASSEMBLY__
2254 #include <asm/processor.h> /* For TASK_SIZE */
2255 #include <asm/mmu.h>
2256diff -urNp linux-2.6.32.46/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.46/arch/powerpc/include/asm/pte-hash32.h
2257--- linux-2.6.32.46/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2258+++ linux-2.6.32.46/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2259@@ -21,6 +21,7 @@
2260 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2261 #define _PAGE_USER 0x004 /* usermode access allowed */
2262 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2263+#define _PAGE_EXEC _PAGE_GUARDED
2264 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2265 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2266 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2267diff -urNp linux-2.6.32.46/arch/powerpc/include/asm/ptrace.h linux-2.6.32.46/arch/powerpc/include/asm/ptrace.h
2268--- linux-2.6.32.46/arch/powerpc/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
2269+++ linux-2.6.32.46/arch/powerpc/include/asm/ptrace.h 2011-08-21 15:53:58.000000000 -0400
2270@@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct p
2271 } while(0)
2272
2273 struct task_struct;
2274-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2275+extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2276 extern int ptrace_put_reg(struct task_struct *task, int regno,
2277 unsigned long data);
2278
2279diff -urNp linux-2.6.32.46/arch/powerpc/include/asm/reg.h linux-2.6.32.46/arch/powerpc/include/asm/reg.h
2280--- linux-2.6.32.46/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2281+++ linux-2.6.32.46/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2282@@ -191,6 +191,7 @@
2283 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2284 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2285 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2286+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2287 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2288 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2289 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2290diff -urNp linux-2.6.32.46/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.46/arch/powerpc/include/asm/swiotlb.h
2291--- linux-2.6.32.46/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2292+++ linux-2.6.32.46/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2293@@ -13,7 +13,7 @@
2294
2295 #include <linux/swiotlb.h>
2296
2297-extern struct dma_map_ops swiotlb_dma_ops;
2298+extern const struct dma_map_ops swiotlb_dma_ops;
2299
2300 static inline void dma_mark_clean(void *addr, size_t size) {}
2301
2302diff -urNp linux-2.6.32.46/arch/powerpc/include/asm/system.h linux-2.6.32.46/arch/powerpc/include/asm/system.h
2303--- linux-2.6.32.46/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2304+++ linux-2.6.32.46/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2305@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2306 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2307 #endif
2308
2309-extern unsigned long arch_align_stack(unsigned long sp);
2310+#define arch_align_stack(x) ((x) & ~0xfUL)
2311
2312 /* Used in very early kernel initialization. */
2313 extern unsigned long reloc_offset(void);
2314diff -urNp linux-2.6.32.46/arch/powerpc/include/asm/uaccess.h linux-2.6.32.46/arch/powerpc/include/asm/uaccess.h
2315--- linux-2.6.32.46/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2316+++ linux-2.6.32.46/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2317@@ -13,6 +13,8 @@
2318 #define VERIFY_READ 0
2319 #define VERIFY_WRITE 1
2320
2321+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2322+
2323 /*
2324 * The fs value determines whether argument validity checking should be
2325 * performed or not. If get_fs() == USER_DS, checking is performed, with
2326@@ -327,52 +329,6 @@ do { \
2327 extern unsigned long __copy_tofrom_user(void __user *to,
2328 const void __user *from, unsigned long size);
2329
2330-#ifndef __powerpc64__
2331-
2332-static inline unsigned long copy_from_user(void *to,
2333- const void __user *from, unsigned long n)
2334-{
2335- unsigned long over;
2336-
2337- if (access_ok(VERIFY_READ, from, n))
2338- return __copy_tofrom_user((__force void __user *)to, from, n);
2339- if ((unsigned long)from < TASK_SIZE) {
2340- over = (unsigned long)from + n - TASK_SIZE;
2341- return __copy_tofrom_user((__force void __user *)to, from,
2342- n - over) + over;
2343- }
2344- return n;
2345-}
2346-
2347-static inline unsigned long copy_to_user(void __user *to,
2348- const void *from, unsigned long n)
2349-{
2350- unsigned long over;
2351-
2352- if (access_ok(VERIFY_WRITE, to, n))
2353- return __copy_tofrom_user(to, (__force void __user *)from, n);
2354- if ((unsigned long)to < TASK_SIZE) {
2355- over = (unsigned long)to + n - TASK_SIZE;
2356- return __copy_tofrom_user(to, (__force void __user *)from,
2357- n - over) + over;
2358- }
2359- return n;
2360-}
2361-
2362-#else /* __powerpc64__ */
2363-
2364-#define __copy_in_user(to, from, size) \
2365- __copy_tofrom_user((to), (from), (size))
2366-
2367-extern unsigned long copy_from_user(void *to, const void __user *from,
2368- unsigned long n);
2369-extern unsigned long copy_to_user(void __user *to, const void *from,
2370- unsigned long n);
2371-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2372- unsigned long n);
2373-
2374-#endif /* __powerpc64__ */
2375-
2376 static inline unsigned long __copy_from_user_inatomic(void *to,
2377 const void __user *from, unsigned long n)
2378 {
2379@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2380 if (ret == 0)
2381 return 0;
2382 }
2383+
2384+ if (!__builtin_constant_p(n))
2385+ check_object_size(to, n, false);
2386+
2387 return __copy_tofrom_user((__force void __user *)to, from, n);
2388 }
2389
2390@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2391 if (ret == 0)
2392 return 0;
2393 }
2394+
2395+ if (!__builtin_constant_p(n))
2396+ check_object_size(from, n, true);
2397+
2398 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2399 }
2400
2401@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2402 return __copy_to_user_inatomic(to, from, size);
2403 }
2404
2405+#ifndef __powerpc64__
2406+
2407+static inline unsigned long __must_check copy_from_user(void *to,
2408+ const void __user *from, unsigned long n)
2409+{
2410+ unsigned long over;
2411+
2412+ if ((long)n < 0)
2413+ return n;
2414+
2415+ if (access_ok(VERIFY_READ, from, n)) {
2416+ if (!__builtin_constant_p(n))
2417+ check_object_size(to, n, false);
2418+ return __copy_tofrom_user((__force void __user *)to, from, n);
2419+ }
2420+ if ((unsigned long)from < TASK_SIZE) {
2421+ over = (unsigned long)from + n - TASK_SIZE;
2422+ if (!__builtin_constant_p(n - over))
2423+ check_object_size(to, n - over, false);
2424+ return __copy_tofrom_user((__force void __user *)to, from,
2425+ n - over) + over;
2426+ }
2427+ return n;
2428+}
2429+
2430+static inline unsigned long __must_check copy_to_user(void __user *to,
2431+ const void *from, unsigned long n)
2432+{
2433+ unsigned long over;
2434+
2435+ if ((long)n < 0)
2436+ return n;
2437+
2438+ if (access_ok(VERIFY_WRITE, to, n)) {
2439+ if (!__builtin_constant_p(n))
2440+ check_object_size(from, n, true);
2441+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2442+ }
2443+ if ((unsigned long)to < TASK_SIZE) {
2444+ over = (unsigned long)to + n - TASK_SIZE;
2445+ if (!__builtin_constant_p(n))
2446+ check_object_size(from, n - over, true);
2447+ return __copy_tofrom_user(to, (__force void __user *)from,
2448+ n - over) + over;
2449+ }
2450+ return n;
2451+}
2452+
2453+#else /* __powerpc64__ */
2454+
2455+#define __copy_in_user(to, from, size) \
2456+ __copy_tofrom_user((to), (from), (size))
2457+
2458+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2459+{
2460+ if ((long)n < 0 || n > INT_MAX)
2461+ return n;
2462+
2463+ if (!__builtin_constant_p(n))
2464+ check_object_size(to, n, false);
2465+
2466+ if (likely(access_ok(VERIFY_READ, from, n)))
2467+ n = __copy_from_user(to, from, n);
2468+ else
2469+ memset(to, 0, n);
2470+ return n;
2471+}
2472+
2473+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2474+{
2475+ if ((long)n < 0 || n > INT_MAX)
2476+ return n;
2477+
2478+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2479+ if (!__builtin_constant_p(n))
2480+ check_object_size(from, n, true);
2481+ n = __copy_to_user(to, from, n);
2482+ }
2483+ return n;
2484+}
2485+
2486+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2487+ unsigned long n);
2488+
2489+#endif /* __powerpc64__ */
2490+
2491 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2492
2493 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2494diff -urNp linux-2.6.32.46/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.46/arch/powerpc/kernel/cacheinfo.c
2495--- linux-2.6.32.46/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2496+++ linux-2.6.32.46/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2497@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2498 &cache_assoc_attr,
2499 };
2500
2501-static struct sysfs_ops cache_index_ops = {
2502+static const struct sysfs_ops cache_index_ops = {
2503 .show = cache_index_show,
2504 };
2505
2506diff -urNp linux-2.6.32.46/arch/powerpc/kernel/dma.c linux-2.6.32.46/arch/powerpc/kernel/dma.c
2507--- linux-2.6.32.46/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2508+++ linux-2.6.32.46/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2509@@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2510 }
2511 #endif
2512
2513-struct dma_map_ops dma_direct_ops = {
2514+const struct dma_map_ops dma_direct_ops = {
2515 .alloc_coherent = dma_direct_alloc_coherent,
2516 .free_coherent = dma_direct_free_coherent,
2517 .map_sg = dma_direct_map_sg,
2518diff -urNp linux-2.6.32.46/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.46/arch/powerpc/kernel/dma-iommu.c
2519--- linux-2.6.32.46/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2520+++ linux-2.6.32.46/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2521@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2522 }
2523
2524 /* We support DMA to/from any memory page via the iommu */
2525-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2526+int dma_iommu_dma_supported(struct device *dev, u64 mask)
2527 {
2528 struct iommu_table *tbl = get_iommu_table_base(dev);
2529
2530diff -urNp linux-2.6.32.46/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.46/arch/powerpc/kernel/dma-swiotlb.c
2531--- linux-2.6.32.46/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2532+++ linux-2.6.32.46/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2533@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2534 * map_page, and unmap_page on highmem, use normal dma_ops
2535 * for everything else.
2536 */
2537-struct dma_map_ops swiotlb_dma_ops = {
2538+const struct dma_map_ops swiotlb_dma_ops = {
2539 .alloc_coherent = dma_direct_alloc_coherent,
2540 .free_coherent = dma_direct_free_coherent,
2541 .map_sg = swiotlb_map_sg_attrs,
2542diff -urNp linux-2.6.32.46/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.46/arch/powerpc/kernel/exceptions-64e.S
2543--- linux-2.6.32.46/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2544+++ linux-2.6.32.46/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2545@@ -455,6 +455,7 @@ storage_fault_common:
2546 std r14,_DAR(r1)
2547 std r15,_DSISR(r1)
2548 addi r3,r1,STACK_FRAME_OVERHEAD
2549+ bl .save_nvgprs
2550 mr r4,r14
2551 mr r5,r15
2552 ld r14,PACA_EXGEN+EX_R14(r13)
2553@@ -464,8 +465,7 @@ storage_fault_common:
2554 cmpdi r3,0
2555 bne- 1f
2556 b .ret_from_except_lite
2557-1: bl .save_nvgprs
2558- mr r5,r3
2559+1: mr r5,r3
2560 addi r3,r1,STACK_FRAME_OVERHEAD
2561 ld r4,_DAR(r1)
2562 bl .bad_page_fault
2563diff -urNp linux-2.6.32.46/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.46/arch/powerpc/kernel/exceptions-64s.S
2564--- linux-2.6.32.46/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2565+++ linux-2.6.32.46/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2566@@ -818,10 +818,10 @@ handle_page_fault:
2567 11: ld r4,_DAR(r1)
2568 ld r5,_DSISR(r1)
2569 addi r3,r1,STACK_FRAME_OVERHEAD
2570+ bl .save_nvgprs
2571 bl .do_page_fault
2572 cmpdi r3,0
2573 beq+ 13f
2574- bl .save_nvgprs
2575 mr r5,r3
2576 addi r3,r1,STACK_FRAME_OVERHEAD
2577 lwz r4,_DAR(r1)
2578diff -urNp linux-2.6.32.46/arch/powerpc/kernel/ibmebus.c linux-2.6.32.46/arch/powerpc/kernel/ibmebus.c
2579--- linux-2.6.32.46/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2580+++ linux-2.6.32.46/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2581@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2582 return 1;
2583 }
2584
2585-static struct dma_map_ops ibmebus_dma_ops = {
2586+static const struct dma_map_ops ibmebus_dma_ops = {
2587 .alloc_coherent = ibmebus_alloc_coherent,
2588 .free_coherent = ibmebus_free_coherent,
2589 .map_sg = ibmebus_map_sg,
2590diff -urNp linux-2.6.32.46/arch/powerpc/kernel/kgdb.c linux-2.6.32.46/arch/powerpc/kernel/kgdb.c
2591--- linux-2.6.32.46/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2592+++ linux-2.6.32.46/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2593@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2594 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2595 return 0;
2596
2597- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2598+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2599 regs->nip += 4;
2600
2601 return 1;
2602@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2603 /*
2604 * Global data
2605 */
2606-struct kgdb_arch arch_kgdb_ops = {
2607+const struct kgdb_arch arch_kgdb_ops = {
2608 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2609 };
2610
2611diff -urNp linux-2.6.32.46/arch/powerpc/kernel/module_32.c linux-2.6.32.46/arch/powerpc/kernel/module_32.c
2612--- linux-2.6.32.46/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2613+++ linux-2.6.32.46/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2614@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2615 me->arch.core_plt_section = i;
2616 }
2617 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2618- printk("Module doesn't contain .plt or .init.plt sections.\n");
2619+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2620 return -ENOEXEC;
2621 }
2622
2623@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2624
2625 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2626 /* Init, or core PLT? */
2627- if (location >= mod->module_core
2628- && location < mod->module_core + mod->core_size)
2629+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2630+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2631 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2632- else
2633+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2634+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2635 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2636+ else {
2637+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2638+ return ~0UL;
2639+ }
2640
2641 /* Find this entry, or if that fails, the next avail. entry */
2642 while (entry->jump[0]) {
2643diff -urNp linux-2.6.32.46/arch/powerpc/kernel/module.c linux-2.6.32.46/arch/powerpc/kernel/module.c
2644--- linux-2.6.32.46/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2645+++ linux-2.6.32.46/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2646@@ -31,11 +31,24 @@
2647
2648 LIST_HEAD(module_bug_list);
2649
2650+#ifdef CONFIG_PAX_KERNEXEC
2651 void *module_alloc(unsigned long size)
2652 {
2653 if (size == 0)
2654 return NULL;
2655
2656+ return vmalloc(size);
2657+}
2658+
2659+void *module_alloc_exec(unsigned long size)
2660+#else
2661+void *module_alloc(unsigned long size)
2662+#endif
2663+
2664+{
2665+ if (size == 0)
2666+ return NULL;
2667+
2668 return vmalloc_exec(size);
2669 }
2670
2671@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2672 vfree(module_region);
2673 }
2674
2675+#ifdef CONFIG_PAX_KERNEXEC
2676+void module_free_exec(struct module *mod, void *module_region)
2677+{
2678+ module_free(mod, module_region);
2679+}
2680+#endif
2681+
2682 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2683 const Elf_Shdr *sechdrs,
2684 const char *name)
2685diff -urNp linux-2.6.32.46/arch/powerpc/kernel/pci-common.c linux-2.6.32.46/arch/powerpc/kernel/pci-common.c
2686--- linux-2.6.32.46/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2687+++ linux-2.6.32.46/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2688@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2689 unsigned int ppc_pci_flags = 0;
2690
2691
2692-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2693+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2694
2695-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2696+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2697 {
2698 pci_dma_ops = dma_ops;
2699 }
2700
2701-struct dma_map_ops *get_pci_dma_ops(void)
2702+const struct dma_map_ops *get_pci_dma_ops(void)
2703 {
2704 return pci_dma_ops;
2705 }
2706diff -urNp linux-2.6.32.46/arch/powerpc/kernel/process.c linux-2.6.32.46/arch/powerpc/kernel/process.c
2707--- linux-2.6.32.46/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2708+++ linux-2.6.32.46/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2709@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2710 * Lookup NIP late so we have the best change of getting the
2711 * above info out without failing
2712 */
2713- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2714- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2715+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2716+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2717 #endif
2718 show_stack(current, (unsigned long *) regs->gpr[1]);
2719 if (!user_mode(regs))
2720@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2721 newsp = stack[0];
2722 ip = stack[STACK_FRAME_LR_SAVE];
2723 if (!firstframe || ip != lr) {
2724- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2725+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2726 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2727 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2728- printk(" (%pS)",
2729+ printk(" (%pA)",
2730 (void *)current->ret_stack[curr_frame].ret);
2731 curr_frame--;
2732 }
2733@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2734 struct pt_regs *regs = (struct pt_regs *)
2735 (sp + STACK_FRAME_OVERHEAD);
2736 lr = regs->link;
2737- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2738+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2739 regs->trap, (void *)regs->nip, (void *)lr);
2740 firstframe = 1;
2741 }
2742@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2743 }
2744
2745 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2746-
2747-unsigned long arch_align_stack(unsigned long sp)
2748-{
2749- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2750- sp -= get_random_int() & ~PAGE_MASK;
2751- return sp & ~0xf;
2752-}
2753-
2754-static inline unsigned long brk_rnd(void)
2755-{
2756- unsigned long rnd = 0;
2757-
2758- /* 8MB for 32bit, 1GB for 64bit */
2759- if (is_32bit_task())
2760- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2761- else
2762- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2763-
2764- return rnd << PAGE_SHIFT;
2765-}
2766-
2767-unsigned long arch_randomize_brk(struct mm_struct *mm)
2768-{
2769- unsigned long base = mm->brk;
2770- unsigned long ret;
2771-
2772-#ifdef CONFIG_PPC_STD_MMU_64
2773- /*
2774- * If we are using 1TB segments and we are allowed to randomise
2775- * the heap, we can put it above 1TB so it is backed by a 1TB
2776- * segment. Otherwise the heap will be in the bottom 1TB
2777- * which always uses 256MB segments and this may result in a
2778- * performance penalty.
2779- */
2780- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2781- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2782-#endif
2783-
2784- ret = PAGE_ALIGN(base + brk_rnd());
2785-
2786- if (ret < mm->brk)
2787- return mm->brk;
2788-
2789- return ret;
2790-}
2791-
2792-unsigned long randomize_et_dyn(unsigned long base)
2793-{
2794- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2795-
2796- if (ret < base)
2797- return base;
2798-
2799- return ret;
2800-}
2801diff -urNp linux-2.6.32.46/arch/powerpc/kernel/ptrace.c linux-2.6.32.46/arch/powerpc/kernel/ptrace.c
2802--- linux-2.6.32.46/arch/powerpc/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
2803+++ linux-2.6.32.46/arch/powerpc/kernel/ptrace.c 2011-08-21 15:53:39.000000000 -0400
2804@@ -86,7 +86,7 @@ static int set_user_trap(struct task_str
2805 /*
2806 * Get contents of register REGNO in task TASK.
2807 */
2808-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
2809+unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
2810 {
2811 if (task->thread.regs == NULL)
2812 return -EIO;
2813@@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *chi
2814
2815 CHECK_FULL_REGS(child->thread.regs);
2816 if (index < PT_FPR0) {
2817- tmp = ptrace_get_reg(child, (int) index);
2818+ tmp = ptrace_get_reg(child, index);
2819 } else {
2820 flush_fp_to_thread(child);
2821 tmp = ((unsigned long *)child->thread.fpr)
2822diff -urNp linux-2.6.32.46/arch/powerpc/kernel/signal_32.c linux-2.6.32.46/arch/powerpc/kernel/signal_32.c
2823--- linux-2.6.32.46/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2824+++ linux-2.6.32.46/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2825@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2826 /* Save user registers on the stack */
2827 frame = &rt_sf->uc.uc_mcontext;
2828 addr = frame;
2829- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2830+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2831 if (save_user_regs(regs, frame, 0, 1))
2832 goto badframe;
2833 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2834diff -urNp linux-2.6.32.46/arch/powerpc/kernel/signal_64.c linux-2.6.32.46/arch/powerpc/kernel/signal_64.c
2835--- linux-2.6.32.46/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2836+++ linux-2.6.32.46/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2837@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2838 current->thread.fpscr.val = 0;
2839
2840 /* Set up to return from userspace. */
2841- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2842+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2843 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2844 } else {
2845 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2846diff -urNp linux-2.6.32.46/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.46/arch/powerpc/kernel/sys_ppc32.c
2847--- linux-2.6.32.46/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2848+++ linux-2.6.32.46/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2849@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2850 if (oldlenp) {
2851 if (!error) {
2852 if (get_user(oldlen, oldlenp) ||
2853- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2854+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2855+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2856 error = -EFAULT;
2857 }
2858- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2859 }
2860 return error;
2861 }
2862diff -urNp linux-2.6.32.46/arch/powerpc/kernel/traps.c linux-2.6.32.46/arch/powerpc/kernel/traps.c
2863--- linux-2.6.32.46/arch/powerpc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
2864+++ linux-2.6.32.46/arch/powerpc/kernel/traps.c 2011-06-13 21:33:37.000000000 -0400
2865@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2866 static inline void pmac_backlight_unblank(void) { }
2867 #endif
2868
2869+extern void gr_handle_kernel_exploit(void);
2870+
2871 int die(const char *str, struct pt_regs *regs, long err)
2872 {
2873 static struct {
2874@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2875 if (panic_on_oops)
2876 panic("Fatal exception");
2877
2878+ gr_handle_kernel_exploit();
2879+
2880 oops_exit();
2881 do_exit(err);
2882
2883diff -urNp linux-2.6.32.46/arch/powerpc/kernel/vdso.c linux-2.6.32.46/arch/powerpc/kernel/vdso.c
2884--- linux-2.6.32.46/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2885+++ linux-2.6.32.46/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2886@@ -36,6 +36,7 @@
2887 #include <asm/firmware.h>
2888 #include <asm/vdso.h>
2889 #include <asm/vdso_datapage.h>
2890+#include <asm/mman.h>
2891
2892 #include "setup.h"
2893
2894@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2895 vdso_base = VDSO32_MBASE;
2896 #endif
2897
2898- current->mm->context.vdso_base = 0;
2899+ current->mm->context.vdso_base = ~0UL;
2900
2901 /* vDSO has a problem and was disabled, just don't "enable" it for the
2902 * process
2903@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2904 vdso_base = get_unmapped_area(NULL, vdso_base,
2905 (vdso_pages << PAGE_SHIFT) +
2906 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2907- 0, 0);
2908+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2909 if (IS_ERR_VALUE(vdso_base)) {
2910 rc = vdso_base;
2911 goto fail_mmapsem;
2912diff -urNp linux-2.6.32.46/arch/powerpc/kernel/vio.c linux-2.6.32.46/arch/powerpc/kernel/vio.c
2913--- linux-2.6.32.46/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2914+++ linux-2.6.32.46/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2915@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2916 vio_cmo_dealloc(viodev, alloc_size);
2917 }
2918
2919-struct dma_map_ops vio_dma_mapping_ops = {
2920+static const struct dma_map_ops vio_dma_mapping_ops = {
2921 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2922 .free_coherent = vio_dma_iommu_free_coherent,
2923 .map_sg = vio_dma_iommu_map_sg,
2924 .unmap_sg = vio_dma_iommu_unmap_sg,
2925+ .dma_supported = dma_iommu_dma_supported,
2926 .map_page = vio_dma_iommu_map_page,
2927 .unmap_page = vio_dma_iommu_unmap_page,
2928
2929@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2930
2931 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2932 {
2933- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2934 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2935 }
2936
2937diff -urNp linux-2.6.32.46/arch/powerpc/lib/usercopy_64.c linux-2.6.32.46/arch/powerpc/lib/usercopy_64.c
2938--- linux-2.6.32.46/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2939+++ linux-2.6.32.46/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2940@@ -9,22 +9,6 @@
2941 #include <linux/module.h>
2942 #include <asm/uaccess.h>
2943
2944-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2945-{
2946- if (likely(access_ok(VERIFY_READ, from, n)))
2947- n = __copy_from_user(to, from, n);
2948- else
2949- memset(to, 0, n);
2950- return n;
2951-}
2952-
2953-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2954-{
2955- if (likely(access_ok(VERIFY_WRITE, to, n)))
2956- n = __copy_to_user(to, from, n);
2957- return n;
2958-}
2959-
2960 unsigned long copy_in_user(void __user *to, const void __user *from,
2961 unsigned long n)
2962 {
2963@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2964 return n;
2965 }
2966
2967-EXPORT_SYMBOL(copy_from_user);
2968-EXPORT_SYMBOL(copy_to_user);
2969 EXPORT_SYMBOL(copy_in_user);
2970
2971diff -urNp linux-2.6.32.46/arch/powerpc/Makefile linux-2.6.32.46/arch/powerpc/Makefile
2972--- linux-2.6.32.46/arch/powerpc/Makefile 2011-03-27 14:31:47.000000000 -0400
2973+++ linux-2.6.32.46/arch/powerpc/Makefile 2011-08-21 19:27:08.000000000 -0400
2974@@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2975 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2976 CPP = $(CC) -E $(KBUILD_CFLAGS)
2977
2978+cflags-y += -Wno-sign-compare -Wno-extra
2979+
2980 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2981
2982 ifeq ($(CONFIG_PPC64),y)
2983diff -urNp linux-2.6.32.46/arch/powerpc/mm/fault.c linux-2.6.32.46/arch/powerpc/mm/fault.c
2984--- linux-2.6.32.46/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2985+++ linux-2.6.32.46/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2986@@ -30,6 +30,10 @@
2987 #include <linux/kprobes.h>
2988 #include <linux/kdebug.h>
2989 #include <linux/perf_event.h>
2990+#include <linux/slab.h>
2991+#include <linux/pagemap.h>
2992+#include <linux/compiler.h>
2993+#include <linux/unistd.h>
2994
2995 #include <asm/firmware.h>
2996 #include <asm/page.h>
2997@@ -40,6 +44,7 @@
2998 #include <asm/uaccess.h>
2999 #include <asm/tlbflush.h>
3000 #include <asm/siginfo.h>
3001+#include <asm/ptrace.h>
3002
3003
3004 #ifdef CONFIG_KPROBES
3005@@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
3006 }
3007 #endif
3008
3009+#ifdef CONFIG_PAX_PAGEEXEC
3010+/*
3011+ * PaX: decide what to do with offenders (regs->nip = fault address)
3012+ *
3013+ * returns 1 when task should be killed
3014+ */
3015+static int pax_handle_fetch_fault(struct pt_regs *regs)
3016+{
3017+ return 1;
3018+}
3019+
3020+void pax_report_insns(void *pc, void *sp)
3021+{
3022+ unsigned long i;
3023+
3024+ printk(KERN_ERR "PAX: bytes at PC: ");
3025+ for (i = 0; i < 5; i++) {
3026+ unsigned int c;
3027+ if (get_user(c, (unsigned int __user *)pc+i))
3028+ printk(KERN_CONT "???????? ");
3029+ else
3030+ printk(KERN_CONT "%08x ", c);
3031+ }
3032+ printk("\n");
3033+}
3034+#endif
3035+
3036 /*
3037 * Check whether the instruction at regs->nip is a store using
3038 * an update addressing form which will update r1.
3039@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
3040 * indicate errors in DSISR but can validly be set in SRR1.
3041 */
3042 if (trap == 0x400)
3043- error_code &= 0x48200000;
3044+ error_code &= 0x58200000;
3045 else
3046 is_write = error_code & DSISR_ISSTORE;
3047 #else
3048@@ -250,7 +282,7 @@ good_area:
3049 * "undefined". Of those that can be set, this is the only
3050 * one which seems bad.
3051 */
3052- if (error_code & 0x10000000)
3053+ if (error_code & DSISR_GUARDED)
3054 /* Guarded storage error. */
3055 goto bad_area;
3056 #endif /* CONFIG_8xx */
3057@@ -265,7 +297,7 @@ good_area:
3058 * processors use the same I/D cache coherency mechanism
3059 * as embedded.
3060 */
3061- if (error_code & DSISR_PROTFAULT)
3062+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3063 goto bad_area;
3064 #endif /* CONFIG_PPC_STD_MMU */
3065
3066@@ -335,6 +367,23 @@ bad_area:
3067 bad_area_nosemaphore:
3068 /* User mode accesses cause a SIGSEGV */
3069 if (user_mode(regs)) {
3070+
3071+#ifdef CONFIG_PAX_PAGEEXEC
3072+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3073+#ifdef CONFIG_PPC_STD_MMU
3074+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3075+#else
3076+ if (is_exec && regs->nip == address) {
3077+#endif
3078+ switch (pax_handle_fetch_fault(regs)) {
3079+ }
3080+
3081+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3082+ do_group_exit(SIGKILL);
3083+ }
3084+ }
3085+#endif
3086+
3087 _exception(SIGSEGV, regs, code, address);
3088 return 0;
3089 }
3090diff -urNp linux-2.6.32.46/arch/powerpc/mm/mem.c linux-2.6.32.46/arch/powerpc/mm/mem.c
3091--- linux-2.6.32.46/arch/powerpc/mm/mem.c 2011-03-27 14:31:47.000000000 -0400
3092+++ linux-2.6.32.46/arch/powerpc/mm/mem.c 2011-08-21 15:50:39.000000000 -0400
3093@@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(voi
3094 {
3095 unsigned long lmb_next_region_start_pfn,
3096 lmb_region_max_pfn;
3097- int i;
3098+ unsigned int i;
3099
3100 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3101 lmb_region_max_pfn =
3102diff -urNp linux-2.6.32.46/arch/powerpc/mm/mmap_64.c linux-2.6.32.46/arch/powerpc/mm/mmap_64.c
3103--- linux-2.6.32.46/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
3104+++ linux-2.6.32.46/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
3105@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
3106 */
3107 if (mmap_is_legacy()) {
3108 mm->mmap_base = TASK_UNMAPPED_BASE;
3109+
3110+#ifdef CONFIG_PAX_RANDMMAP
3111+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3112+ mm->mmap_base += mm->delta_mmap;
3113+#endif
3114+
3115 mm->get_unmapped_area = arch_get_unmapped_area;
3116 mm->unmap_area = arch_unmap_area;
3117 } else {
3118 mm->mmap_base = mmap_base();
3119+
3120+#ifdef CONFIG_PAX_RANDMMAP
3121+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3122+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3123+#endif
3124+
3125 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3126 mm->unmap_area = arch_unmap_area_topdown;
3127 }
3128diff -urNp linux-2.6.32.46/arch/powerpc/mm/slice.c linux-2.6.32.46/arch/powerpc/mm/slice.c
3129--- linux-2.6.32.46/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
3130+++ linux-2.6.32.46/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
3131@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
3132 if ((mm->task_size - len) < addr)
3133 return 0;
3134 vma = find_vma(mm, addr);
3135- return (!vma || (addr + len) <= vma->vm_start);
3136+ return check_heap_stack_gap(vma, addr, len);
3137 }
3138
3139 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3140@@ -256,7 +256,7 @@ full_search:
3141 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3142 continue;
3143 }
3144- if (!vma || addr + len <= vma->vm_start) {
3145+ if (check_heap_stack_gap(vma, addr, len)) {
3146 /*
3147 * Remember the place where we stopped the search:
3148 */
3149@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
3150 }
3151 }
3152
3153- addr = mm->mmap_base;
3154- while (addr > len) {
3155+ if (mm->mmap_base < len)
3156+ addr = -ENOMEM;
3157+ else
3158+ addr = mm->mmap_base - len;
3159+
3160+ while (!IS_ERR_VALUE(addr)) {
3161 /* Go down by chunk size */
3162- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3163+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
3164
3165 /* Check for hit with different page size */
3166 mask = slice_range_to_mask(addr, len);
3167@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
3168 * return with success:
3169 */
3170 vma = find_vma(mm, addr);
3171- if (!vma || (addr + len) <= vma->vm_start) {
3172+ if (check_heap_stack_gap(vma, addr, len)) {
3173 /* remember the address as a hint for next time */
3174 if (use_cache)
3175 mm->free_area_cache = addr;
3176@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
3177 mm->cached_hole_size = vma->vm_start - addr;
3178
3179 /* try just below the current vma->vm_start */
3180- addr = vma->vm_start;
3181+ addr = skip_heap_stack_gap(vma, len);
3182 }
3183
3184 /*
3185@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
3186 if (fixed && addr > (mm->task_size - len))
3187 return -EINVAL;
3188
3189+#ifdef CONFIG_PAX_RANDMMAP
3190+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3191+ addr = 0;
3192+#endif
3193+
3194 /* If hint, make sure it matches our alignment restrictions */
3195 if (!fixed && addr) {
3196 addr = _ALIGN_UP(addr, 1ul << pshift);
3197diff -urNp linux-2.6.32.46/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.46/arch/powerpc/platforms/52xx/lite5200_pm.c
3198--- linux-2.6.32.46/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
3199+++ linux-2.6.32.46/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
3200@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3201 lite5200_pm_target_state = PM_SUSPEND_ON;
3202 }
3203
3204-static struct platform_suspend_ops lite5200_pm_ops = {
3205+static const struct platform_suspend_ops lite5200_pm_ops = {
3206 .valid = lite5200_pm_valid,
3207 .begin = lite5200_pm_begin,
3208 .prepare = lite5200_pm_prepare,
3209diff -urNp linux-2.6.32.46/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.46/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3210--- linux-2.6.32.46/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
3211+++ linux-2.6.32.46/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
3212@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3213 iounmap(mbar);
3214 }
3215
3216-static struct platform_suspend_ops mpc52xx_pm_ops = {
3217+static const struct platform_suspend_ops mpc52xx_pm_ops = {
3218 .valid = mpc52xx_pm_valid,
3219 .prepare = mpc52xx_pm_prepare,
3220 .enter = mpc52xx_pm_enter,
3221diff -urNp linux-2.6.32.46/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.46/arch/powerpc/platforms/83xx/suspend.c
3222--- linux-2.6.32.46/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
3223+++ linux-2.6.32.46/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
3224@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3225 return ret;
3226 }
3227
3228-static struct platform_suspend_ops mpc83xx_suspend_ops = {
3229+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3230 .valid = mpc83xx_suspend_valid,
3231 .begin = mpc83xx_suspend_begin,
3232 .enter = mpc83xx_suspend_enter,
3233diff -urNp linux-2.6.32.46/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.46/arch/powerpc/platforms/cell/iommu.c
3234--- linux-2.6.32.46/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
3235+++ linux-2.6.32.46/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
3236@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3237
3238 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3239
3240-struct dma_map_ops dma_iommu_fixed_ops = {
3241+const struct dma_map_ops dma_iommu_fixed_ops = {
3242 .alloc_coherent = dma_fixed_alloc_coherent,
3243 .free_coherent = dma_fixed_free_coherent,
3244 .map_sg = dma_fixed_map_sg,
3245diff -urNp linux-2.6.32.46/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.46/arch/powerpc/platforms/ps3/system-bus.c
3246--- linux-2.6.32.46/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
3247+++ linux-2.6.32.46/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
3248@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3249 return mask >= DMA_BIT_MASK(32);
3250 }
3251
3252-static struct dma_map_ops ps3_sb_dma_ops = {
3253+static const struct dma_map_ops ps3_sb_dma_ops = {
3254 .alloc_coherent = ps3_alloc_coherent,
3255 .free_coherent = ps3_free_coherent,
3256 .map_sg = ps3_sb_map_sg,
3257@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3258 .unmap_page = ps3_unmap_page,
3259 };
3260
3261-static struct dma_map_ops ps3_ioc0_dma_ops = {
3262+static const struct dma_map_ops ps3_ioc0_dma_ops = {
3263 .alloc_coherent = ps3_alloc_coherent,
3264 .free_coherent = ps3_free_coherent,
3265 .map_sg = ps3_ioc0_map_sg,
3266diff -urNp linux-2.6.32.46/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.46/arch/powerpc/platforms/pseries/Kconfig
3267--- linux-2.6.32.46/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
3268+++ linux-2.6.32.46/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
3269@@ -2,6 +2,8 @@ config PPC_PSERIES
3270 depends on PPC64 && PPC_BOOK3S
3271 bool "IBM pSeries & new (POWER5-based) iSeries"
3272 select MPIC
3273+ select PCI_MSI
3274+ select XICS
3275 select PPC_I8259
3276 select PPC_RTAS
3277 select RTAS_ERROR_LOGGING
3278diff -urNp linux-2.6.32.46/arch/s390/include/asm/elf.h linux-2.6.32.46/arch/s390/include/asm/elf.h
3279--- linux-2.6.32.46/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3280+++ linux-2.6.32.46/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3281@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3282 that it will "exec", and that there is sufficient room for the brk. */
3283 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3284
3285+#ifdef CONFIG_PAX_ASLR
3286+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3287+
3288+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3289+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3290+#endif
3291+
3292 /* This yields a mask that user programs can use to figure out what
3293 instruction set this CPU supports. */
3294
3295diff -urNp linux-2.6.32.46/arch/s390/include/asm/setup.h linux-2.6.32.46/arch/s390/include/asm/setup.h
3296--- linux-2.6.32.46/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3297+++ linux-2.6.32.46/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3298@@ -50,13 +50,13 @@ extern unsigned long memory_end;
3299 void detect_memory_layout(struct mem_chunk chunk[]);
3300
3301 #ifdef CONFIG_S390_SWITCH_AMODE
3302-extern unsigned int switch_amode;
3303+#define switch_amode (1)
3304 #else
3305 #define switch_amode (0)
3306 #endif
3307
3308 #ifdef CONFIG_S390_EXEC_PROTECT
3309-extern unsigned int s390_noexec;
3310+#define s390_noexec (1)
3311 #else
3312 #define s390_noexec (0)
3313 #endif
3314diff -urNp linux-2.6.32.46/arch/s390/include/asm/uaccess.h linux-2.6.32.46/arch/s390/include/asm/uaccess.h
3315--- linux-2.6.32.46/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3316+++ linux-2.6.32.46/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3317@@ -232,6 +232,10 @@ static inline unsigned long __must_check
3318 copy_to_user(void __user *to, const void *from, unsigned long n)
3319 {
3320 might_fault();
3321+
3322+ if ((long)n < 0)
3323+ return n;
3324+
3325 if (access_ok(VERIFY_WRITE, to, n))
3326 n = __copy_to_user(to, from, n);
3327 return n;
3328@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3329 static inline unsigned long __must_check
3330 __copy_from_user(void *to, const void __user *from, unsigned long n)
3331 {
3332+ if ((long)n < 0)
3333+ return n;
3334+
3335 if (__builtin_constant_p(n) && (n <= 256))
3336 return uaccess.copy_from_user_small(n, from, to);
3337 else
3338@@ -283,6 +290,10 @@ static inline unsigned long __must_check
3339 copy_from_user(void *to, const void __user *from, unsigned long n)
3340 {
3341 might_fault();
3342+
3343+ if ((long)n < 0)
3344+ return n;
3345+
3346 if (access_ok(VERIFY_READ, from, n))
3347 n = __copy_from_user(to, from, n);
3348 else
3349diff -urNp linux-2.6.32.46/arch/s390/Kconfig linux-2.6.32.46/arch/s390/Kconfig
3350--- linux-2.6.32.46/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3351+++ linux-2.6.32.46/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3352@@ -194,28 +194,26 @@ config AUDIT_ARCH
3353
3354 config S390_SWITCH_AMODE
3355 bool "Switch kernel/user addressing modes"
3356+ default y
3357 help
3358 This option allows to switch the addressing modes of kernel and user
3359- space. The kernel parameter switch_amode=on will enable this feature,
3360- default is disabled. Enabling this (via kernel parameter) on machines
3361- earlier than IBM System z9-109 EC/BC will reduce system performance.
3362+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3363+ will reduce system performance.
3364
3365 Note that this option will also be selected by selecting the execute
3366- protection option below. Enabling the execute protection via the
3367- noexec kernel parameter will also switch the addressing modes,
3368- independent of the switch_amode kernel parameter.
3369+ protection option below. Enabling the execute protection will also
3370+ switch the addressing modes, independent of this option.
3371
3372
3373 config S390_EXEC_PROTECT
3374 bool "Data execute protection"
3375+ default y
3376 select S390_SWITCH_AMODE
3377 help
3378 This option allows to enable a buffer overflow protection for user
3379 space programs and it also selects the addressing mode option above.
3380- The kernel parameter noexec=on will enable this feature and also
3381- switch the addressing modes, default is disabled. Enabling this (via
3382- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3383- will reduce system performance.
3384+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
3385+ reduce system performance.
3386
3387 comment "Code generation options"
3388
3389diff -urNp linux-2.6.32.46/arch/s390/kernel/module.c linux-2.6.32.46/arch/s390/kernel/module.c
3390--- linux-2.6.32.46/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3391+++ linux-2.6.32.46/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3392@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3393
3394 /* Increase core size by size of got & plt and set start
3395 offsets for got and plt. */
3396- me->core_size = ALIGN(me->core_size, 4);
3397- me->arch.got_offset = me->core_size;
3398- me->core_size += me->arch.got_size;
3399- me->arch.plt_offset = me->core_size;
3400- me->core_size += me->arch.plt_size;
3401+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3402+ me->arch.got_offset = me->core_size_rw;
3403+ me->core_size_rw += me->arch.got_size;
3404+ me->arch.plt_offset = me->core_size_rx;
3405+ me->core_size_rx += me->arch.plt_size;
3406 return 0;
3407 }
3408
3409@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3410 if (info->got_initialized == 0) {
3411 Elf_Addr *gotent;
3412
3413- gotent = me->module_core + me->arch.got_offset +
3414+ gotent = me->module_core_rw + me->arch.got_offset +
3415 info->got_offset;
3416 *gotent = val;
3417 info->got_initialized = 1;
3418@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3419 else if (r_type == R_390_GOTENT ||
3420 r_type == R_390_GOTPLTENT)
3421 *(unsigned int *) loc =
3422- (val + (Elf_Addr) me->module_core - loc) >> 1;
3423+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3424 else if (r_type == R_390_GOT64 ||
3425 r_type == R_390_GOTPLT64)
3426 *(unsigned long *) loc = val;
3427@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3428 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3429 if (info->plt_initialized == 0) {
3430 unsigned int *ip;
3431- ip = me->module_core + me->arch.plt_offset +
3432+ ip = me->module_core_rx + me->arch.plt_offset +
3433 info->plt_offset;
3434 #ifndef CONFIG_64BIT
3435 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3436@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3437 val - loc + 0xffffUL < 0x1ffffeUL) ||
3438 (r_type == R_390_PLT32DBL &&
3439 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3440- val = (Elf_Addr) me->module_core +
3441+ val = (Elf_Addr) me->module_core_rx +
3442 me->arch.plt_offset +
3443 info->plt_offset;
3444 val += rela->r_addend - loc;
3445@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3446 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3447 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3448 val = val + rela->r_addend -
3449- ((Elf_Addr) me->module_core + me->arch.got_offset);
3450+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3451 if (r_type == R_390_GOTOFF16)
3452 *(unsigned short *) loc = val;
3453 else if (r_type == R_390_GOTOFF32)
3454@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3455 break;
3456 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3457 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3458- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3459+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3460 rela->r_addend - loc;
3461 if (r_type == R_390_GOTPC)
3462 *(unsigned int *) loc = val;
3463diff -urNp linux-2.6.32.46/arch/s390/kernel/setup.c linux-2.6.32.46/arch/s390/kernel/setup.c
3464--- linux-2.6.32.46/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3465+++ linux-2.6.32.46/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3466@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3467 early_param("mem", early_parse_mem);
3468
3469 #ifdef CONFIG_S390_SWITCH_AMODE
3470-unsigned int switch_amode = 0;
3471-EXPORT_SYMBOL_GPL(switch_amode);
3472-
3473 static int set_amode_and_uaccess(unsigned long user_amode,
3474 unsigned long user32_amode)
3475 {
3476@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3477 return 0;
3478 }
3479 }
3480-
3481-/*
3482- * Switch kernel/user addressing modes?
3483- */
3484-static int __init early_parse_switch_amode(char *p)
3485-{
3486- switch_amode = 1;
3487- return 0;
3488-}
3489-early_param("switch_amode", early_parse_switch_amode);
3490-
3491 #else /* CONFIG_S390_SWITCH_AMODE */
3492 static inline int set_amode_and_uaccess(unsigned long user_amode,
3493 unsigned long user32_amode)
3494@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3495 }
3496 #endif /* CONFIG_S390_SWITCH_AMODE */
3497
3498-#ifdef CONFIG_S390_EXEC_PROTECT
3499-unsigned int s390_noexec = 0;
3500-EXPORT_SYMBOL_GPL(s390_noexec);
3501-
3502-/*
3503- * Enable execute protection?
3504- */
3505-static int __init early_parse_noexec(char *p)
3506-{
3507- if (!strncmp(p, "off", 3))
3508- return 0;
3509- switch_amode = 1;
3510- s390_noexec = 1;
3511- return 0;
3512-}
3513-early_param("noexec", early_parse_noexec);
3514-#endif /* CONFIG_S390_EXEC_PROTECT */
3515-
3516 static void setup_addressing_mode(void)
3517 {
3518 if (s390_noexec) {
3519diff -urNp linux-2.6.32.46/arch/s390/mm/mmap.c linux-2.6.32.46/arch/s390/mm/mmap.c
3520--- linux-2.6.32.46/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3521+++ linux-2.6.32.46/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3522@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3523 */
3524 if (mmap_is_legacy()) {
3525 mm->mmap_base = TASK_UNMAPPED_BASE;
3526+
3527+#ifdef CONFIG_PAX_RANDMMAP
3528+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3529+ mm->mmap_base += mm->delta_mmap;
3530+#endif
3531+
3532 mm->get_unmapped_area = arch_get_unmapped_area;
3533 mm->unmap_area = arch_unmap_area;
3534 } else {
3535 mm->mmap_base = mmap_base();
3536+
3537+#ifdef CONFIG_PAX_RANDMMAP
3538+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3539+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3540+#endif
3541+
3542 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3543 mm->unmap_area = arch_unmap_area_topdown;
3544 }
3545@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3546 */
3547 if (mmap_is_legacy()) {
3548 mm->mmap_base = TASK_UNMAPPED_BASE;
3549+
3550+#ifdef CONFIG_PAX_RANDMMAP
3551+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3552+ mm->mmap_base += mm->delta_mmap;
3553+#endif
3554+
3555 mm->get_unmapped_area = s390_get_unmapped_area;
3556 mm->unmap_area = arch_unmap_area;
3557 } else {
3558 mm->mmap_base = mmap_base();
3559+
3560+#ifdef CONFIG_PAX_RANDMMAP
3561+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3562+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3563+#endif
3564+
3565 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3566 mm->unmap_area = arch_unmap_area_topdown;
3567 }
3568diff -urNp linux-2.6.32.46/arch/score/include/asm/system.h linux-2.6.32.46/arch/score/include/asm/system.h
3569--- linux-2.6.32.46/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3570+++ linux-2.6.32.46/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3571@@ -17,7 +17,7 @@ do { \
3572 #define finish_arch_switch(prev) do {} while (0)
3573
3574 typedef void (*vi_handler_t)(void);
3575-extern unsigned long arch_align_stack(unsigned long sp);
3576+#define arch_align_stack(x) (x)
3577
3578 #define mb() barrier()
3579 #define rmb() barrier()
3580diff -urNp linux-2.6.32.46/arch/score/kernel/process.c linux-2.6.32.46/arch/score/kernel/process.c
3581--- linux-2.6.32.46/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3582+++ linux-2.6.32.46/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3583@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3584
3585 return task_pt_regs(task)->cp0_epc;
3586 }
3587-
3588-unsigned long arch_align_stack(unsigned long sp)
3589-{
3590- return sp;
3591-}
3592diff -urNp linux-2.6.32.46/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.46/arch/sh/boards/mach-hp6xx/pm.c
3593--- linux-2.6.32.46/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3594+++ linux-2.6.32.46/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3595@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3596 return 0;
3597 }
3598
3599-static struct platform_suspend_ops hp6x0_pm_ops = {
3600+static const struct platform_suspend_ops hp6x0_pm_ops = {
3601 .enter = hp6x0_pm_enter,
3602 .valid = suspend_valid_only_mem,
3603 };
3604diff -urNp linux-2.6.32.46/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.46/arch/sh/kernel/cpu/sh4/sq.c
3605--- linux-2.6.32.46/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3606+++ linux-2.6.32.46/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3607@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3608 NULL,
3609 };
3610
3611-static struct sysfs_ops sq_sysfs_ops = {
3612+static const struct sysfs_ops sq_sysfs_ops = {
3613 .show = sq_sysfs_show,
3614 .store = sq_sysfs_store,
3615 };
3616diff -urNp linux-2.6.32.46/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.46/arch/sh/kernel/cpu/shmobile/pm.c
3617--- linux-2.6.32.46/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3618+++ linux-2.6.32.46/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3619@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3620 return 0;
3621 }
3622
3623-static struct platform_suspend_ops sh_pm_ops = {
3624+static const struct platform_suspend_ops sh_pm_ops = {
3625 .enter = sh_pm_enter,
3626 .valid = suspend_valid_only_mem,
3627 };
3628diff -urNp linux-2.6.32.46/arch/sh/kernel/kgdb.c linux-2.6.32.46/arch/sh/kernel/kgdb.c
3629--- linux-2.6.32.46/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3630+++ linux-2.6.32.46/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3631@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3632 {
3633 }
3634
3635-struct kgdb_arch arch_kgdb_ops = {
3636+const struct kgdb_arch arch_kgdb_ops = {
3637 /* Breakpoint instruction: trapa #0x3c */
3638 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3639 .gdb_bpt_instr = { 0x3c, 0xc3 },
3640diff -urNp linux-2.6.32.46/arch/sh/mm/mmap.c linux-2.6.32.46/arch/sh/mm/mmap.c
3641--- linux-2.6.32.46/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3642+++ linux-2.6.32.46/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3643@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3644 addr = PAGE_ALIGN(addr);
3645
3646 vma = find_vma(mm, addr);
3647- if (TASK_SIZE - len >= addr &&
3648- (!vma || addr + len <= vma->vm_start))
3649+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3650 return addr;
3651 }
3652
3653@@ -106,7 +105,7 @@ full_search:
3654 }
3655 return -ENOMEM;
3656 }
3657- if (likely(!vma || addr + len <= vma->vm_start)) {
3658+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3659 /*
3660 * Remember the place where we stopped the search:
3661 */
3662@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3663 addr = PAGE_ALIGN(addr);
3664
3665 vma = find_vma(mm, addr);
3666- if (TASK_SIZE - len >= addr &&
3667- (!vma || addr + len <= vma->vm_start))
3668+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3669 return addr;
3670 }
3671
3672@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3673 /* make sure it can fit in the remaining address space */
3674 if (likely(addr > len)) {
3675 vma = find_vma(mm, addr-len);
3676- if (!vma || addr <= vma->vm_start) {
3677+ if (check_heap_stack_gap(vma, addr - len, len)) {
3678 /* remember the address as a hint for next time */
3679 return (mm->free_area_cache = addr-len);
3680 }
3681@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3682 if (unlikely(mm->mmap_base < len))
3683 goto bottomup;
3684
3685- addr = mm->mmap_base-len;
3686- if (do_colour_align)
3687- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3688+ addr = mm->mmap_base - len;
3689
3690 do {
3691+ if (do_colour_align)
3692+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3693 /*
3694 * Lookup failure means no vma is above this address,
3695 * else if new region fits below vma->vm_start,
3696 * return with success:
3697 */
3698 vma = find_vma(mm, addr);
3699- if (likely(!vma || addr+len <= vma->vm_start)) {
3700+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3701 /* remember the address as a hint for next time */
3702 return (mm->free_area_cache = addr);
3703 }
3704@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3705 mm->cached_hole_size = vma->vm_start - addr;
3706
3707 /* try just below the current vma->vm_start */
3708- addr = vma->vm_start-len;
3709- if (do_colour_align)
3710- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3711- } while (likely(len < vma->vm_start));
3712+ addr = skip_heap_stack_gap(vma, len);
3713+ } while (!IS_ERR_VALUE(addr));
3714
3715 bottomup:
3716 /*
3717diff -urNp linux-2.6.32.46/arch/sparc/include/asm/atomic_64.h linux-2.6.32.46/arch/sparc/include/asm/atomic_64.h
3718--- linux-2.6.32.46/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3719+++ linux-2.6.32.46/arch/sparc/include/asm/atomic_64.h 2011-08-18 23:11:34.000000000 -0400
3720@@ -14,18 +14,40 @@
3721 #define ATOMIC64_INIT(i) { (i) }
3722
3723 #define atomic_read(v) ((v)->counter)
3724+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3725+{
3726+ return v->counter;
3727+}
3728 #define atomic64_read(v) ((v)->counter)
3729+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3730+{
3731+ return v->counter;
3732+}
3733
3734 #define atomic_set(v, i) (((v)->counter) = i)
3735+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3736+{
3737+ v->counter = i;
3738+}
3739 #define atomic64_set(v, i) (((v)->counter) = i)
3740+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3741+{
3742+ v->counter = i;
3743+}
3744
3745 extern void atomic_add(int, atomic_t *);
3746+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3747 extern void atomic64_add(long, atomic64_t *);
3748+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3749 extern void atomic_sub(int, atomic_t *);
3750+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3751 extern void atomic64_sub(long, atomic64_t *);
3752+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3753
3754 extern int atomic_add_ret(int, atomic_t *);
3755+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3756 extern long atomic64_add_ret(long, atomic64_t *);
3757+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3758 extern int atomic_sub_ret(int, atomic_t *);
3759 extern long atomic64_sub_ret(long, atomic64_t *);
3760
3761@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
3762 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3763
3764 #define atomic_inc_return(v) atomic_add_ret(1, v)
3765+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3766+{
3767+ return atomic_add_ret_unchecked(1, v);
3768+}
3769 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3770+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3771+{
3772+ return atomic64_add_ret_unchecked(1, v);
3773+}
3774
3775 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3776 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3777
3778 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3779+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3780+{
3781+ return atomic_add_ret_unchecked(i, v);
3782+}
3783 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3784+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3785+{
3786+ return atomic64_add_ret_unchecked(i, v);
3787+}
3788
3789 /*
3790 * atomic_inc_and_test - increment and test
3791@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
3792 * other cases.
3793 */
3794 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3795+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3796+{
3797+ return atomic_inc_return_unchecked(v) == 0;
3798+}
3799 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3800
3801 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3802@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
3803 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3804
3805 #define atomic_inc(v) atomic_add(1, v)
3806+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3807+{
3808+ atomic_add_unchecked(1, v);
3809+}
3810 #define atomic64_inc(v) atomic64_add(1, v)
3811+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3812+{
3813+ atomic64_add_unchecked(1, v);
3814+}
3815
3816 #define atomic_dec(v) atomic_sub(1, v)
3817+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3818+{
3819+ atomic_sub_unchecked(1, v);
3820+}
3821 #define atomic64_dec(v) atomic64_sub(1, v)
3822+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3823+{
3824+ atomic64_sub_unchecked(1, v);
3825+}
3826
3827 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3828 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3829
3830 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3831+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3832+{
3833+ return cmpxchg(&v->counter, old, new);
3834+}
3835 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3836+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3837+{
3838+ return xchg(&v->counter, new);
3839+}
3840
3841 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3842 {
3843- int c, old;
3844+ int c, old, new;
3845 c = atomic_read(v);
3846 for (;;) {
3847- if (unlikely(c == (u)))
3848+ if (unlikely(c == u))
3849 break;
3850- old = atomic_cmpxchg((v), c, c + (a));
3851+
3852+ asm volatile("addcc %2, %0, %0\n"
3853+
3854+#ifdef CONFIG_PAX_REFCOUNT
3855+ "tvs %%icc, 6\n"
3856+#endif
3857+
3858+ : "=r" (new)
3859+ : "0" (c), "ir" (a)
3860+ : "cc");
3861+
3862+ old = atomic_cmpxchg(v, c, new);
3863 if (likely(old == c))
3864 break;
3865 c = old;
3866 }
3867- return c != (u);
3868+ return c != u;
3869 }
3870
3871 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3872@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
3873 #define atomic64_cmpxchg(v, o, n) \
3874 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3875 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3876+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3877+{
3878+ return xchg(&v->counter, new);
3879+}
3880
3881 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3882 {
3883- long c, old;
3884+ long c, old, new;
3885 c = atomic64_read(v);
3886 for (;;) {
3887- if (unlikely(c == (u)))
3888+ if (unlikely(c == u))
3889 break;
3890- old = atomic64_cmpxchg((v), c, c + (a));
3891+
3892+ asm volatile("addcc %2, %0, %0\n"
3893+
3894+#ifdef CONFIG_PAX_REFCOUNT
3895+ "tvs %%xcc, 6\n"
3896+#endif
3897+
3898+ : "=r" (new)
3899+ : "0" (c), "ir" (a)
3900+ : "cc");
3901+
3902+ old = atomic64_cmpxchg(v, c, new);
3903 if (likely(old == c))
3904 break;
3905 c = old;
3906 }
3907- return c != (u);
3908+ return c != u;
3909 }
3910
3911 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3912diff -urNp linux-2.6.32.46/arch/sparc/include/asm/cache.h linux-2.6.32.46/arch/sparc/include/asm/cache.h
3913--- linux-2.6.32.46/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3914+++ linux-2.6.32.46/arch/sparc/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
3915@@ -8,7 +8,7 @@
3916 #define _SPARC_CACHE_H
3917
3918 #define L1_CACHE_SHIFT 5
3919-#define L1_CACHE_BYTES 32
3920+#define L1_CACHE_BYTES 32UL
3921 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3922
3923 #ifdef CONFIG_SPARC32
3924diff -urNp linux-2.6.32.46/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.46/arch/sparc/include/asm/dma-mapping.h
3925--- linux-2.6.32.46/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3926+++ linux-2.6.32.46/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3927@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3928 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3929 #define dma_is_consistent(d, h) (1)
3930
3931-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3932+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3933 extern struct bus_type pci_bus_type;
3934
3935-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3936+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3937 {
3938 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3939 if (dev->bus == &pci_bus_type)
3940@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3941 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3942 dma_addr_t *dma_handle, gfp_t flag)
3943 {
3944- struct dma_map_ops *ops = get_dma_ops(dev);
3945+ const struct dma_map_ops *ops = get_dma_ops(dev);
3946 void *cpu_addr;
3947
3948 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3949@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3950 static inline void dma_free_coherent(struct device *dev, size_t size,
3951 void *cpu_addr, dma_addr_t dma_handle)
3952 {
3953- struct dma_map_ops *ops = get_dma_ops(dev);
3954+ const struct dma_map_ops *ops = get_dma_ops(dev);
3955
3956 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3957 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3958diff -urNp linux-2.6.32.46/arch/sparc/include/asm/elf_32.h linux-2.6.32.46/arch/sparc/include/asm/elf_32.h
3959--- linux-2.6.32.46/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3960+++ linux-2.6.32.46/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3961@@ -116,6 +116,13 @@ typedef struct {
3962
3963 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3964
3965+#ifdef CONFIG_PAX_ASLR
3966+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3967+
3968+#define PAX_DELTA_MMAP_LEN 16
3969+#define PAX_DELTA_STACK_LEN 16
3970+#endif
3971+
3972 /* This yields a mask that user programs can use to figure out what
3973 instruction set this cpu supports. This can NOT be done in userspace
3974 on Sparc. */
3975diff -urNp linux-2.6.32.46/arch/sparc/include/asm/elf_64.h linux-2.6.32.46/arch/sparc/include/asm/elf_64.h
3976--- linux-2.6.32.46/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3977+++ linux-2.6.32.46/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3978@@ -163,6 +163,12 @@ typedef struct {
3979 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3980 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3981
3982+#ifdef CONFIG_PAX_ASLR
3983+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3984+
3985+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3986+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3987+#endif
3988
3989 /* This yields a mask that user programs can use to figure out what
3990 instruction set this cpu supports. */
3991diff -urNp linux-2.6.32.46/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.46/arch/sparc/include/asm/pgtable_32.h
3992--- linux-2.6.32.46/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3993+++ linux-2.6.32.46/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3994@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3995 BTFIXUPDEF_INT(page_none)
3996 BTFIXUPDEF_INT(page_copy)
3997 BTFIXUPDEF_INT(page_readonly)
3998+
3999+#ifdef CONFIG_PAX_PAGEEXEC
4000+BTFIXUPDEF_INT(page_shared_noexec)
4001+BTFIXUPDEF_INT(page_copy_noexec)
4002+BTFIXUPDEF_INT(page_readonly_noexec)
4003+#endif
4004+
4005 BTFIXUPDEF_INT(page_kernel)
4006
4007 #define PMD_SHIFT SUN4C_PMD_SHIFT
4008@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4009 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4010 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4011
4012+#ifdef CONFIG_PAX_PAGEEXEC
4013+extern pgprot_t PAGE_SHARED_NOEXEC;
4014+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4015+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4016+#else
4017+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4018+# define PAGE_COPY_NOEXEC PAGE_COPY
4019+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4020+#endif
4021+
4022 extern unsigned long page_kernel;
4023
4024 #ifdef MODULE
4025diff -urNp linux-2.6.32.46/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.46/arch/sparc/include/asm/pgtsrmmu.h
4026--- linux-2.6.32.46/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
4027+++ linux-2.6.32.46/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
4028@@ -115,6 +115,13 @@
4029 SRMMU_EXEC | SRMMU_REF)
4030 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4031 SRMMU_EXEC | SRMMU_REF)
4032+
4033+#ifdef CONFIG_PAX_PAGEEXEC
4034+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4035+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4036+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4037+#endif
4038+
4039 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4040 SRMMU_DIRTY | SRMMU_REF)
4041
4042diff -urNp linux-2.6.32.46/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.46/arch/sparc/include/asm/spinlock_64.h
4043--- linux-2.6.32.46/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
4044+++ linux-2.6.32.46/arch/sparc/include/asm/spinlock_64.h 2011-08-18 23:19:30.000000000 -0400
4045@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
4046
4047 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4048
4049-static void inline arch_read_lock(raw_rwlock_t *lock)
4050+static inline void arch_read_lock(raw_rwlock_t *lock)
4051 {
4052 unsigned long tmp1, tmp2;
4053
4054 __asm__ __volatile__ (
4055 "1: ldsw [%2], %0\n"
4056 " brlz,pn %0, 2f\n"
4057-"4: add %0, 1, %1\n"
4058+"4: addcc %0, 1, %1\n"
4059+
4060+#ifdef CONFIG_PAX_REFCOUNT
4061+" tvs %%icc, 6\n"
4062+#endif
4063+
4064 " cas [%2], %0, %1\n"
4065 " cmp %0, %1\n"
4066 " bne,pn %%icc, 1b\n"
4067@@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rw
4068 " .previous"
4069 : "=&r" (tmp1), "=&r" (tmp2)
4070 : "r" (lock)
4071- : "memory");
4072+ : "memory", "cc");
4073 }
4074
4075-static int inline arch_read_trylock(raw_rwlock_t *lock)
4076+static inline int arch_read_trylock(raw_rwlock_t *lock)
4077 {
4078 int tmp1, tmp2;
4079
4080@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
4081 "1: ldsw [%2], %0\n"
4082 " brlz,a,pn %0, 2f\n"
4083 " mov 0, %0\n"
4084-" add %0, 1, %1\n"
4085+" addcc %0, 1, %1\n"
4086+
4087+#ifdef CONFIG_PAX_REFCOUNT
4088+" tvs %%icc, 6\n"
4089+#endif
4090+
4091 " cas [%2], %0, %1\n"
4092 " cmp %0, %1\n"
4093 " bne,pn %%icc, 1b\n"
4094@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
4095 return tmp1;
4096 }
4097
4098-static void inline arch_read_unlock(raw_rwlock_t *lock)
4099+static inline void arch_read_unlock(raw_rwlock_t *lock)
4100 {
4101 unsigned long tmp1, tmp2;
4102
4103 __asm__ __volatile__(
4104 "1: lduw [%2], %0\n"
4105-" sub %0, 1, %1\n"
4106+" subcc %0, 1, %1\n"
4107+
4108+#ifdef CONFIG_PAX_REFCOUNT
4109+" tvs %%icc, 6\n"
4110+#endif
4111+
4112 " cas [%2], %0, %1\n"
4113 " cmp %0, %1\n"
4114 " bne,pn %%xcc, 1b\n"
4115@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
4116 : "memory");
4117 }
4118
4119-static void inline arch_write_lock(raw_rwlock_t *lock)
4120+static inline void arch_write_lock(raw_rwlock_t *lock)
4121 {
4122 unsigned long mask, tmp1, tmp2;
4123
4124@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
4125 : "memory");
4126 }
4127
4128-static void inline arch_write_unlock(raw_rwlock_t *lock)
4129+static inline void arch_write_unlock(raw_rwlock_t *lock)
4130 {
4131 __asm__ __volatile__(
4132 " stw %%g0, [%0]"
4133@@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw
4134 : "memory");
4135 }
4136
4137-static int inline arch_write_trylock(raw_rwlock_t *lock)
4138+static inline int arch_write_trylock(raw_rwlock_t *lock)
4139 {
4140 unsigned long mask, tmp1, tmp2, result;
4141
4142diff -urNp linux-2.6.32.46/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.46/arch/sparc/include/asm/thread_info_32.h
4143--- linux-2.6.32.46/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
4144+++ linux-2.6.32.46/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
4145@@ -50,6 +50,8 @@ struct thread_info {
4146 unsigned long w_saved;
4147
4148 struct restart_block restart_block;
4149+
4150+ unsigned long lowest_stack;
4151 };
4152
4153 /*
4154diff -urNp linux-2.6.32.46/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.46/arch/sparc/include/asm/thread_info_64.h
4155--- linux-2.6.32.46/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
4156+++ linux-2.6.32.46/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
4157@@ -68,6 +68,8 @@ struct thread_info {
4158 struct pt_regs *kern_una_regs;
4159 unsigned int kern_una_insn;
4160
4161+ unsigned long lowest_stack;
4162+
4163 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4164 };
4165
4166diff -urNp linux-2.6.32.46/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.46/arch/sparc/include/asm/uaccess_32.h
4167--- linux-2.6.32.46/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
4168+++ linux-2.6.32.46/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
4169@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
4170
4171 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4172 {
4173- if (n && __access_ok((unsigned long) to, n))
4174+ if ((long)n < 0)
4175+ return n;
4176+
4177+ if (n && __access_ok((unsigned long) to, n)) {
4178+ if (!__builtin_constant_p(n))
4179+ check_object_size(from, n, true);
4180 return __copy_user(to, (__force void __user *) from, n);
4181- else
4182+ } else
4183 return n;
4184 }
4185
4186 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4187 {
4188+ if ((long)n < 0)
4189+ return n;
4190+
4191+ if (!__builtin_constant_p(n))
4192+ check_object_size(from, n, true);
4193+
4194 return __copy_user(to, (__force void __user *) from, n);
4195 }
4196
4197 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4198 {
4199- if (n && __access_ok((unsigned long) from, n))
4200+ if ((long)n < 0)
4201+ return n;
4202+
4203+ if (n && __access_ok((unsigned long) from, n)) {
4204+ if (!__builtin_constant_p(n))
4205+ check_object_size(to, n, false);
4206 return __copy_user((__force void __user *) to, from, n);
4207- else
4208+ } else
4209 return n;
4210 }
4211
4212 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4213 {
4214+ if ((long)n < 0)
4215+ return n;
4216+
4217 return __copy_user((__force void __user *) to, from, n);
4218 }
4219
4220diff -urNp linux-2.6.32.46/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.46/arch/sparc/include/asm/uaccess_64.h
4221--- linux-2.6.32.46/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
4222+++ linux-2.6.32.46/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
4223@@ -9,6 +9,7 @@
4224 #include <linux/compiler.h>
4225 #include <linux/string.h>
4226 #include <linux/thread_info.h>
4227+#include <linux/kernel.h>
4228 #include <asm/asi.h>
4229 #include <asm/system.h>
4230 #include <asm/spitfire.h>
4231@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
4232 static inline unsigned long __must_check
4233 copy_from_user(void *to, const void __user *from, unsigned long size)
4234 {
4235- unsigned long ret = ___copy_from_user(to, from, size);
4236+ unsigned long ret;
4237
4238+ if ((long)size < 0 || size > INT_MAX)
4239+ return size;
4240+
4241+ if (!__builtin_constant_p(size))
4242+ check_object_size(to, size, false);
4243+
4244+ ret = ___copy_from_user(to, from, size);
4245 if (unlikely(ret))
4246 ret = copy_from_user_fixup(to, from, size);
4247 return ret;
4248@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
4249 static inline unsigned long __must_check
4250 copy_to_user(void __user *to, const void *from, unsigned long size)
4251 {
4252- unsigned long ret = ___copy_to_user(to, from, size);
4253+ unsigned long ret;
4254+
4255+ if ((long)size < 0 || size > INT_MAX)
4256+ return size;
4257+
4258+ if (!__builtin_constant_p(size))
4259+ check_object_size(from, size, true);
4260
4261+ ret = ___copy_to_user(to, from, size);
4262 if (unlikely(ret))
4263 ret = copy_to_user_fixup(to, from, size);
4264 return ret;
4265diff -urNp linux-2.6.32.46/arch/sparc/include/asm/uaccess.h linux-2.6.32.46/arch/sparc/include/asm/uaccess.h
4266--- linux-2.6.32.46/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
4267+++ linux-2.6.32.46/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
4268@@ -1,5 +1,13 @@
4269 #ifndef ___ASM_SPARC_UACCESS_H
4270 #define ___ASM_SPARC_UACCESS_H
4271+
4272+#ifdef __KERNEL__
4273+#ifndef __ASSEMBLY__
4274+#include <linux/types.h>
4275+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4276+#endif
4277+#endif
4278+
4279 #if defined(__sparc__) && defined(__arch64__)
4280 #include <asm/uaccess_64.h>
4281 #else
4282diff -urNp linux-2.6.32.46/arch/sparc/kernel/iommu.c linux-2.6.32.46/arch/sparc/kernel/iommu.c
4283--- linux-2.6.32.46/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
4284+++ linux-2.6.32.46/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
4285@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4286 spin_unlock_irqrestore(&iommu->lock, flags);
4287 }
4288
4289-static struct dma_map_ops sun4u_dma_ops = {
4290+static const struct dma_map_ops sun4u_dma_ops = {
4291 .alloc_coherent = dma_4u_alloc_coherent,
4292 .free_coherent = dma_4u_free_coherent,
4293 .map_page = dma_4u_map_page,
4294@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4295 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4296 };
4297
4298-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4299+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4300 EXPORT_SYMBOL(dma_ops);
4301
4302 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4303diff -urNp linux-2.6.32.46/arch/sparc/kernel/ioport.c linux-2.6.32.46/arch/sparc/kernel/ioport.c
4304--- linux-2.6.32.46/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
4305+++ linux-2.6.32.46/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
4306@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4307 BUG();
4308 }
4309
4310-struct dma_map_ops sbus_dma_ops = {
4311+const struct dma_map_ops sbus_dma_ops = {
4312 .alloc_coherent = sbus_alloc_coherent,
4313 .free_coherent = sbus_free_coherent,
4314 .map_page = sbus_map_page,
4315@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4316 .sync_sg_for_device = sbus_sync_sg_for_device,
4317 };
4318
4319-struct dma_map_ops *dma_ops = &sbus_dma_ops;
4320+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4321 EXPORT_SYMBOL(dma_ops);
4322
4323 static int __init sparc_register_ioport(void)
4324@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4325 }
4326 }
4327
4328-struct dma_map_ops pci32_dma_ops = {
4329+const struct dma_map_ops pci32_dma_ops = {
4330 .alloc_coherent = pci32_alloc_coherent,
4331 .free_coherent = pci32_free_coherent,
4332 .map_page = pci32_map_page,
4333diff -urNp linux-2.6.32.46/arch/sparc/kernel/kgdb_32.c linux-2.6.32.46/arch/sparc/kernel/kgdb_32.c
4334--- linux-2.6.32.46/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4335+++ linux-2.6.32.46/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4336@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4337 {
4338 }
4339
4340-struct kgdb_arch arch_kgdb_ops = {
4341+const struct kgdb_arch arch_kgdb_ops = {
4342 /* Breakpoint instruction: ta 0x7d */
4343 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4344 };
4345diff -urNp linux-2.6.32.46/arch/sparc/kernel/kgdb_64.c linux-2.6.32.46/arch/sparc/kernel/kgdb_64.c
4346--- linux-2.6.32.46/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4347+++ linux-2.6.32.46/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4348@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4349 {
4350 }
4351
4352-struct kgdb_arch arch_kgdb_ops = {
4353+const struct kgdb_arch arch_kgdb_ops = {
4354 /* Breakpoint instruction: ta 0x72 */
4355 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4356 };
4357diff -urNp linux-2.6.32.46/arch/sparc/kernel/Makefile linux-2.6.32.46/arch/sparc/kernel/Makefile
4358--- linux-2.6.32.46/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4359+++ linux-2.6.32.46/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4360@@ -3,7 +3,7 @@
4361 #
4362
4363 asflags-y := -ansi
4364-ccflags-y := -Werror
4365+#ccflags-y := -Werror
4366
4367 extra-y := head_$(BITS).o
4368 extra-y += init_task.o
4369diff -urNp linux-2.6.32.46/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.46/arch/sparc/kernel/pci_sun4v.c
4370--- linux-2.6.32.46/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4371+++ linux-2.6.32.46/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4372@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4373 spin_unlock_irqrestore(&iommu->lock, flags);
4374 }
4375
4376-static struct dma_map_ops sun4v_dma_ops = {
4377+static const struct dma_map_ops sun4v_dma_ops = {
4378 .alloc_coherent = dma_4v_alloc_coherent,
4379 .free_coherent = dma_4v_free_coherent,
4380 .map_page = dma_4v_map_page,
4381diff -urNp linux-2.6.32.46/arch/sparc/kernel/process_32.c linux-2.6.32.46/arch/sparc/kernel/process_32.c
4382--- linux-2.6.32.46/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4383+++ linux-2.6.32.46/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4384@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4385 rw->ins[4], rw->ins[5],
4386 rw->ins[6],
4387 rw->ins[7]);
4388- printk("%pS\n", (void *) rw->ins[7]);
4389+ printk("%pA\n", (void *) rw->ins[7]);
4390 rw = (struct reg_window32 *) rw->ins[6];
4391 }
4392 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4393@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4394
4395 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4396 r->psr, r->pc, r->npc, r->y, print_tainted());
4397- printk("PC: <%pS>\n", (void *) r->pc);
4398+ printk("PC: <%pA>\n", (void *) r->pc);
4399 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4400 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4401 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4402 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4403 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4404 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4405- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4406+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4407
4408 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4409 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4410@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4411 rw = (struct reg_window32 *) fp;
4412 pc = rw->ins[7];
4413 printk("[%08lx : ", pc);
4414- printk("%pS ] ", (void *) pc);
4415+ printk("%pA ] ", (void *) pc);
4416 fp = rw->ins[6];
4417 } while (++count < 16);
4418 printk("\n");
4419diff -urNp linux-2.6.32.46/arch/sparc/kernel/process_64.c linux-2.6.32.46/arch/sparc/kernel/process_64.c
4420--- linux-2.6.32.46/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4421+++ linux-2.6.32.46/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4422@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4423 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4424 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4425 if (regs->tstate & TSTATE_PRIV)
4426- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4427+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4428 }
4429
4430 void show_regs(struct pt_regs *regs)
4431 {
4432 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4433 regs->tpc, regs->tnpc, regs->y, print_tainted());
4434- printk("TPC: <%pS>\n", (void *) regs->tpc);
4435+ printk("TPC: <%pA>\n", (void *) regs->tpc);
4436 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4437 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4438 regs->u_regs[3]);
4439@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4440 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4441 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4442 regs->u_regs[15]);
4443- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4444+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4445 show_regwindow(regs);
4446 }
4447
4448@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4449 ((tp && tp->task) ? tp->task->pid : -1));
4450
4451 if (gp->tstate & TSTATE_PRIV) {
4452- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4453+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4454 (void *) gp->tpc,
4455 (void *) gp->o7,
4456 (void *) gp->i7,
4457diff -urNp linux-2.6.32.46/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.46/arch/sparc/kernel/sys_sparc_32.c
4458--- linux-2.6.32.46/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4459+++ linux-2.6.32.46/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4460@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4461 if (ARCH_SUN4C && len > 0x20000000)
4462 return -ENOMEM;
4463 if (!addr)
4464- addr = TASK_UNMAPPED_BASE;
4465+ addr = current->mm->mmap_base;
4466
4467 if (flags & MAP_SHARED)
4468 addr = COLOUR_ALIGN(addr);
4469@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4470 }
4471 if (TASK_SIZE - PAGE_SIZE - len < addr)
4472 return -ENOMEM;
4473- if (!vmm || addr + len <= vmm->vm_start)
4474+ if (check_heap_stack_gap(vmm, addr, len))
4475 return addr;
4476 addr = vmm->vm_end;
4477 if (flags & MAP_SHARED)
4478diff -urNp linux-2.6.32.46/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.46/arch/sparc/kernel/sys_sparc_64.c
4479--- linux-2.6.32.46/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4480+++ linux-2.6.32.46/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4481@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4482 /* We do not accept a shared mapping if it would violate
4483 * cache aliasing constraints.
4484 */
4485- if ((flags & MAP_SHARED) &&
4486+ if ((filp || (flags & MAP_SHARED)) &&
4487 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4488 return -EINVAL;
4489 return addr;
4490@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4491 if (filp || (flags & MAP_SHARED))
4492 do_color_align = 1;
4493
4494+#ifdef CONFIG_PAX_RANDMMAP
4495+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4496+#endif
4497+
4498 if (addr) {
4499 if (do_color_align)
4500 addr = COLOUR_ALIGN(addr, pgoff);
4501@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4502 addr = PAGE_ALIGN(addr);
4503
4504 vma = find_vma(mm, addr);
4505- if (task_size - len >= addr &&
4506- (!vma || addr + len <= vma->vm_start))
4507+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4508 return addr;
4509 }
4510
4511 if (len > mm->cached_hole_size) {
4512- start_addr = addr = mm->free_area_cache;
4513+ start_addr = addr = mm->free_area_cache;
4514 } else {
4515- start_addr = addr = TASK_UNMAPPED_BASE;
4516+ start_addr = addr = mm->mmap_base;
4517 mm->cached_hole_size = 0;
4518 }
4519
4520@@ -175,14 +178,14 @@ full_search:
4521 vma = find_vma(mm, VA_EXCLUDE_END);
4522 }
4523 if (unlikely(task_size < addr)) {
4524- if (start_addr != TASK_UNMAPPED_BASE) {
4525- start_addr = addr = TASK_UNMAPPED_BASE;
4526+ if (start_addr != mm->mmap_base) {
4527+ start_addr = addr = mm->mmap_base;
4528 mm->cached_hole_size = 0;
4529 goto full_search;
4530 }
4531 return -ENOMEM;
4532 }
4533- if (likely(!vma || addr + len <= vma->vm_start)) {
4534+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4535 /*
4536 * Remember the place where we stopped the search:
4537 */
4538@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4539 /* We do not accept a shared mapping if it would violate
4540 * cache aliasing constraints.
4541 */
4542- if ((flags & MAP_SHARED) &&
4543+ if ((filp || (flags & MAP_SHARED)) &&
4544 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4545 return -EINVAL;
4546 return addr;
4547@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4548 addr = PAGE_ALIGN(addr);
4549
4550 vma = find_vma(mm, addr);
4551- if (task_size - len >= addr &&
4552- (!vma || addr + len <= vma->vm_start))
4553+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4554 return addr;
4555 }
4556
4557@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4558 /* make sure it can fit in the remaining address space */
4559 if (likely(addr > len)) {
4560 vma = find_vma(mm, addr-len);
4561- if (!vma || addr <= vma->vm_start) {
4562+ if (check_heap_stack_gap(vma, addr - len, len)) {
4563 /* remember the address as a hint for next time */
4564 return (mm->free_area_cache = addr-len);
4565 }
4566@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4567 if (unlikely(mm->mmap_base < len))
4568 goto bottomup;
4569
4570- addr = mm->mmap_base-len;
4571- if (do_color_align)
4572- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4573+ addr = mm->mmap_base - len;
4574
4575 do {
4576+ if (do_color_align)
4577+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4578 /*
4579 * Lookup failure means no vma is above this address,
4580 * else if new region fits below vma->vm_start,
4581 * return with success:
4582 */
4583 vma = find_vma(mm, addr);
4584- if (likely(!vma || addr+len <= vma->vm_start)) {
4585+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4586 /* remember the address as a hint for next time */
4587 return (mm->free_area_cache = addr);
4588 }
4589@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4590 mm->cached_hole_size = vma->vm_start - addr;
4591
4592 /* try just below the current vma->vm_start */
4593- addr = vma->vm_start-len;
4594- if (do_color_align)
4595- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4596- } while (likely(len < vma->vm_start));
4597+ addr = skip_heap_stack_gap(vma, len);
4598+ } while (!IS_ERR_VALUE(addr));
4599
4600 bottomup:
4601 /*
4602@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4603 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4604 sysctl_legacy_va_layout) {
4605 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4606+
4607+#ifdef CONFIG_PAX_RANDMMAP
4608+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4609+ mm->mmap_base += mm->delta_mmap;
4610+#endif
4611+
4612 mm->get_unmapped_area = arch_get_unmapped_area;
4613 mm->unmap_area = arch_unmap_area;
4614 } else {
4615@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4616 gap = (task_size / 6 * 5);
4617
4618 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4619+
4620+#ifdef CONFIG_PAX_RANDMMAP
4621+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4622+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4623+#endif
4624+
4625 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4626 mm->unmap_area = arch_unmap_area_topdown;
4627 }
4628diff -urNp linux-2.6.32.46/arch/sparc/kernel/traps_32.c linux-2.6.32.46/arch/sparc/kernel/traps_32.c
4629--- linux-2.6.32.46/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4630+++ linux-2.6.32.46/arch/sparc/kernel/traps_32.c 2011-06-13 21:25:39.000000000 -0400
4631@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4632 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4633 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4634
4635+extern void gr_handle_kernel_exploit(void);
4636+
4637 void die_if_kernel(char *str, struct pt_regs *regs)
4638 {
4639 static int die_counter;
4640@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4641 count++ < 30 &&
4642 (((unsigned long) rw) >= PAGE_OFFSET) &&
4643 !(((unsigned long) rw) & 0x7)) {
4644- printk("Caller[%08lx]: %pS\n", rw->ins[7],
4645+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
4646 (void *) rw->ins[7]);
4647 rw = (struct reg_window32 *)rw->ins[6];
4648 }
4649 }
4650 printk("Instruction DUMP:");
4651 instruction_dump ((unsigned long *) regs->pc);
4652- if(regs->psr & PSR_PS)
4653+ if(regs->psr & PSR_PS) {
4654+ gr_handle_kernel_exploit();
4655 do_exit(SIGKILL);
4656+ }
4657 do_exit(SIGSEGV);
4658 }
4659
4660diff -urNp linux-2.6.32.46/arch/sparc/kernel/traps_64.c linux-2.6.32.46/arch/sparc/kernel/traps_64.c
4661--- linux-2.6.32.46/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4662+++ linux-2.6.32.46/arch/sparc/kernel/traps_64.c 2011-06-13 21:24:11.000000000 -0400
4663@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4664 i + 1,
4665 p->trapstack[i].tstate, p->trapstack[i].tpc,
4666 p->trapstack[i].tnpc, p->trapstack[i].tt);
4667- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4668+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4669 }
4670 }
4671
4672@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4673
4674 lvl -= 0x100;
4675 if (regs->tstate & TSTATE_PRIV) {
4676+
4677+#ifdef CONFIG_PAX_REFCOUNT
4678+ if (lvl == 6)
4679+ pax_report_refcount_overflow(regs);
4680+#endif
4681+
4682 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4683 die_if_kernel(buffer, regs);
4684 }
4685@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4686 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4687 {
4688 char buffer[32];
4689-
4690+
4691 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4692 0, lvl, SIGTRAP) == NOTIFY_STOP)
4693 return;
4694
4695+#ifdef CONFIG_PAX_REFCOUNT
4696+ if (lvl == 6)
4697+ pax_report_refcount_overflow(regs);
4698+#endif
4699+
4700 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4701
4702 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4703@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4704 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4705 printk("%s" "ERROR(%d): ",
4706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4707- printk("TPC<%pS>\n", (void *) regs->tpc);
4708+ printk("TPC<%pA>\n", (void *) regs->tpc);
4709 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4710 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4711 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4712@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4713 smp_processor_id(),
4714 (type & 0x1) ? 'I' : 'D',
4715 regs->tpc);
4716- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4717+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4718 panic("Irrecoverable Cheetah+ parity error.");
4719 }
4720
4721@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4722 smp_processor_id(),
4723 (type & 0x1) ? 'I' : 'D',
4724 regs->tpc);
4725- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4726+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4727 }
4728
4729 struct sun4v_error_entry {
4730@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4731
4732 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4733 regs->tpc, tl);
4734- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4735+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4736 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4737- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4738+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4739 (void *) regs->u_regs[UREG_I7]);
4740 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4741 "pte[%lx] error[%lx]\n",
4742@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4743
4744 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4745 regs->tpc, tl);
4746- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4747+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4748 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4749- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4750+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4751 (void *) regs->u_regs[UREG_I7]);
4752 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4753 "pte[%lx] error[%lx]\n",
4754@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4755 fp = (unsigned long)sf->fp + STACK_BIAS;
4756 }
4757
4758- printk(" [%016lx] %pS\n", pc, (void *) pc);
4759+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4760 } while (++count < 16);
4761 }
4762
4763@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4764 return (struct reg_window *) (fp + STACK_BIAS);
4765 }
4766
4767+extern void gr_handle_kernel_exploit(void);
4768+
4769 void die_if_kernel(char *str, struct pt_regs *regs)
4770 {
4771 static int die_counter;
4772@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4773 while (rw &&
4774 count++ < 30&&
4775 is_kernel_stack(current, rw)) {
4776- printk("Caller[%016lx]: %pS\n", rw->ins[7],
4777+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
4778 (void *) rw->ins[7]);
4779
4780 rw = kernel_stack_up(rw);
4781@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4782 }
4783 user_instruction_dump ((unsigned int __user *) regs->tpc);
4784 }
4785- if (regs->tstate & TSTATE_PRIV)
4786+ if (regs->tstate & TSTATE_PRIV) {
4787+ gr_handle_kernel_exploit();
4788 do_exit(SIGKILL);
4789+ }
4790+
4791 do_exit(SIGSEGV);
4792 }
4793 EXPORT_SYMBOL(die_if_kernel);
4794diff -urNp linux-2.6.32.46/arch/sparc/kernel/una_asm_64.S linux-2.6.32.46/arch/sparc/kernel/una_asm_64.S
4795--- linux-2.6.32.46/arch/sparc/kernel/una_asm_64.S 2011-03-27 14:31:47.000000000 -0400
4796+++ linux-2.6.32.46/arch/sparc/kernel/una_asm_64.S 2011-07-13 22:20:05.000000000 -0400
4797@@ -127,7 +127,7 @@ do_int_load:
4798 wr %o5, 0x0, %asi
4799 retl
4800 mov 0, %o0
4801- .size __do_int_load, .-__do_int_load
4802+ .size do_int_load, .-do_int_load
4803
4804 .section __ex_table,"a"
4805 .word 4b, __retl_efault
4806diff -urNp linux-2.6.32.46/arch/sparc/kernel/unaligned_64.c linux-2.6.32.46/arch/sparc/kernel/unaligned_64.c
4807--- linux-2.6.32.46/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4808+++ linux-2.6.32.46/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4809@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4810 if (count < 5) {
4811 last_time = jiffies;
4812 count++;
4813- printk("Kernel unaligned access at TPC[%lx] %pS\n",
4814+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
4815 regs->tpc, (void *) regs->tpc);
4816 }
4817 }
4818diff -urNp linux-2.6.32.46/arch/sparc/lib/atomic_64.S linux-2.6.32.46/arch/sparc/lib/atomic_64.S
4819--- linux-2.6.32.46/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4820+++ linux-2.6.32.46/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4821@@ -18,7 +18,12 @@
4822 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4823 BACKOFF_SETUP(%o2)
4824 1: lduw [%o1], %g1
4825- add %g1, %o0, %g7
4826+ addcc %g1, %o0, %g7
4827+
4828+#ifdef CONFIG_PAX_REFCOUNT
4829+ tvs %icc, 6
4830+#endif
4831+
4832 cas [%o1], %g1, %g7
4833 cmp %g1, %g7
4834 bne,pn %icc, 2f
4835@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4836 2: BACKOFF_SPIN(%o2, %o3, 1b)
4837 .size atomic_add, .-atomic_add
4838
4839+ .globl atomic_add_unchecked
4840+ .type atomic_add_unchecked,#function
4841+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4842+ BACKOFF_SETUP(%o2)
4843+1: lduw [%o1], %g1
4844+ add %g1, %o0, %g7
4845+ cas [%o1], %g1, %g7
4846+ cmp %g1, %g7
4847+ bne,pn %icc, 2f
4848+ nop
4849+ retl
4850+ nop
4851+2: BACKOFF_SPIN(%o2, %o3, 1b)
4852+ .size atomic_add_unchecked, .-atomic_add_unchecked
4853+
4854 .globl atomic_sub
4855 .type atomic_sub,#function
4856 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4857 BACKOFF_SETUP(%o2)
4858 1: lduw [%o1], %g1
4859- sub %g1, %o0, %g7
4860+ subcc %g1, %o0, %g7
4861+
4862+#ifdef CONFIG_PAX_REFCOUNT
4863+ tvs %icc, 6
4864+#endif
4865+
4866 cas [%o1], %g1, %g7
4867 cmp %g1, %g7
4868 bne,pn %icc, 2f
4869@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4870 2: BACKOFF_SPIN(%o2, %o3, 1b)
4871 .size atomic_sub, .-atomic_sub
4872
4873+ .globl atomic_sub_unchecked
4874+ .type atomic_sub_unchecked,#function
4875+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4876+ BACKOFF_SETUP(%o2)
4877+1: lduw [%o1], %g1
4878+ sub %g1, %o0, %g7
4879+ cas [%o1], %g1, %g7
4880+ cmp %g1, %g7
4881+ bne,pn %icc, 2f
4882+ nop
4883+ retl
4884+ nop
4885+2: BACKOFF_SPIN(%o2, %o3, 1b)
4886+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
4887+
4888 .globl atomic_add_ret
4889 .type atomic_add_ret,#function
4890 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4891 BACKOFF_SETUP(%o2)
4892 1: lduw [%o1], %g1
4893- add %g1, %o0, %g7
4894+ addcc %g1, %o0, %g7
4895+
4896+#ifdef CONFIG_PAX_REFCOUNT
4897+ tvs %icc, 6
4898+#endif
4899+
4900 cas [%o1], %g1, %g7
4901 cmp %g1, %g7
4902 bne,pn %icc, 2f
4903@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4904 2: BACKOFF_SPIN(%o2, %o3, 1b)
4905 .size atomic_add_ret, .-atomic_add_ret
4906
4907+ .globl atomic_add_ret_unchecked
4908+ .type atomic_add_ret_unchecked,#function
4909+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4910+ BACKOFF_SETUP(%o2)
4911+1: lduw [%o1], %g1
4912+ addcc %g1, %o0, %g7
4913+ cas [%o1], %g1, %g7
4914+ cmp %g1, %g7
4915+ bne,pn %icc, 2f
4916+ add %g7, %o0, %g7
4917+ sra %g7, 0, %o0
4918+ retl
4919+ nop
4920+2: BACKOFF_SPIN(%o2, %o3, 1b)
4921+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4922+
4923 .globl atomic_sub_ret
4924 .type atomic_sub_ret,#function
4925 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4926 BACKOFF_SETUP(%o2)
4927 1: lduw [%o1], %g1
4928- sub %g1, %o0, %g7
4929+ subcc %g1, %o0, %g7
4930+
4931+#ifdef CONFIG_PAX_REFCOUNT
4932+ tvs %icc, 6
4933+#endif
4934+
4935 cas [%o1], %g1, %g7
4936 cmp %g1, %g7
4937 bne,pn %icc, 2f
4938@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4939 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4940 BACKOFF_SETUP(%o2)
4941 1: ldx [%o1], %g1
4942- add %g1, %o0, %g7
4943+ addcc %g1, %o0, %g7
4944+
4945+#ifdef CONFIG_PAX_REFCOUNT
4946+ tvs %xcc, 6
4947+#endif
4948+
4949 casx [%o1], %g1, %g7
4950 cmp %g1, %g7
4951 bne,pn %xcc, 2f
4952@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4953 2: BACKOFF_SPIN(%o2, %o3, 1b)
4954 .size atomic64_add, .-atomic64_add
4955
4956+ .globl atomic64_add_unchecked
4957+ .type atomic64_add_unchecked,#function
4958+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4959+ BACKOFF_SETUP(%o2)
4960+1: ldx [%o1], %g1
4961+ addcc %g1, %o0, %g7
4962+ casx [%o1], %g1, %g7
4963+ cmp %g1, %g7
4964+ bne,pn %xcc, 2f
4965+ nop
4966+ retl
4967+ nop
4968+2: BACKOFF_SPIN(%o2, %o3, 1b)
4969+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
4970+
4971 .globl atomic64_sub
4972 .type atomic64_sub,#function
4973 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4974 BACKOFF_SETUP(%o2)
4975 1: ldx [%o1], %g1
4976- sub %g1, %o0, %g7
4977+ subcc %g1, %o0, %g7
4978+
4979+#ifdef CONFIG_PAX_REFCOUNT
4980+ tvs %xcc, 6
4981+#endif
4982+
4983 casx [%o1], %g1, %g7
4984 cmp %g1, %g7
4985 bne,pn %xcc, 2f
4986@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4987 2: BACKOFF_SPIN(%o2, %o3, 1b)
4988 .size atomic64_sub, .-atomic64_sub
4989
4990+ .globl atomic64_sub_unchecked
4991+ .type atomic64_sub_unchecked,#function
4992+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4993+ BACKOFF_SETUP(%o2)
4994+1: ldx [%o1], %g1
4995+ subcc %g1, %o0, %g7
4996+ casx [%o1], %g1, %g7
4997+ cmp %g1, %g7
4998+ bne,pn %xcc, 2f
4999+ nop
5000+ retl
5001+ nop
5002+2: BACKOFF_SPIN(%o2, %o3, 1b)
5003+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5004+
5005 .globl atomic64_add_ret
5006 .type atomic64_add_ret,#function
5007 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5008 BACKOFF_SETUP(%o2)
5009 1: ldx [%o1], %g1
5010- add %g1, %o0, %g7
5011+ addcc %g1, %o0, %g7
5012+
5013+#ifdef CONFIG_PAX_REFCOUNT
5014+ tvs %xcc, 6
5015+#endif
5016+
5017 casx [%o1], %g1, %g7
5018 cmp %g1, %g7
5019 bne,pn %xcc, 2f
5020@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
5021 2: BACKOFF_SPIN(%o2, %o3, 1b)
5022 .size atomic64_add_ret, .-atomic64_add_ret
5023
5024+ .globl atomic64_add_ret_unchecked
5025+ .type atomic64_add_ret_unchecked,#function
5026+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5027+ BACKOFF_SETUP(%o2)
5028+1: ldx [%o1], %g1
5029+ addcc %g1, %o0, %g7
5030+ casx [%o1], %g1, %g7
5031+ cmp %g1, %g7
5032+ bne,pn %xcc, 2f
5033+ add %g7, %o0, %g7
5034+ mov %g7, %o0
5035+ retl
5036+ nop
5037+2: BACKOFF_SPIN(%o2, %o3, 1b)
5038+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5039+
5040 .globl atomic64_sub_ret
5041 .type atomic64_sub_ret,#function
5042 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5043 BACKOFF_SETUP(%o2)
5044 1: ldx [%o1], %g1
5045- sub %g1, %o0, %g7
5046+ subcc %g1, %o0, %g7
5047+
5048+#ifdef CONFIG_PAX_REFCOUNT
5049+ tvs %xcc, 6
5050+#endif
5051+
5052 casx [%o1], %g1, %g7
5053 cmp %g1, %g7
5054 bne,pn %xcc, 2f
5055diff -urNp linux-2.6.32.46/arch/sparc/lib/ksyms.c linux-2.6.32.46/arch/sparc/lib/ksyms.c
5056--- linux-2.6.32.46/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
5057+++ linux-2.6.32.46/arch/sparc/lib/ksyms.c 2011-08-19 23:05:14.000000000 -0400
5058@@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5059
5060 /* Atomic counter implementation. */
5061 EXPORT_SYMBOL(atomic_add);
5062+EXPORT_SYMBOL(atomic_add_unchecked);
5063 EXPORT_SYMBOL(atomic_add_ret);
5064+EXPORT_SYMBOL(atomic_add_ret_unchecked);
5065 EXPORT_SYMBOL(atomic_sub);
5066+EXPORT_SYMBOL(atomic_sub_unchecked);
5067 EXPORT_SYMBOL(atomic_sub_ret);
5068 EXPORT_SYMBOL(atomic64_add);
5069+EXPORT_SYMBOL(atomic64_add_unchecked);
5070 EXPORT_SYMBOL(atomic64_add_ret);
5071+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5072 EXPORT_SYMBOL(atomic64_sub);
5073+EXPORT_SYMBOL(atomic64_sub_unchecked);
5074 EXPORT_SYMBOL(atomic64_sub_ret);
5075
5076 /* Atomic bit operations. */
5077diff -urNp linux-2.6.32.46/arch/sparc/lib/Makefile linux-2.6.32.46/arch/sparc/lib/Makefile
5078--- linux-2.6.32.46/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
5079+++ linux-2.6.32.46/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
5080@@ -2,7 +2,7 @@
5081 #
5082
5083 asflags-y := -ansi -DST_DIV0=0x02
5084-ccflags-y := -Werror
5085+#ccflags-y := -Werror
5086
5087 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5088 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5089diff -urNp linux-2.6.32.46/arch/sparc/lib/rwsem_64.S linux-2.6.32.46/arch/sparc/lib/rwsem_64.S
5090--- linux-2.6.32.46/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
5091+++ linux-2.6.32.46/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
5092@@ -11,7 +11,12 @@
5093 .globl __down_read
5094 __down_read:
5095 1: lduw [%o0], %g1
5096- add %g1, 1, %g7
5097+ addcc %g1, 1, %g7
5098+
5099+#ifdef CONFIG_PAX_REFCOUNT
5100+ tvs %icc, 6
5101+#endif
5102+
5103 cas [%o0], %g1, %g7
5104 cmp %g1, %g7
5105 bne,pn %icc, 1b
5106@@ -33,7 +38,12 @@ __down_read:
5107 .globl __down_read_trylock
5108 __down_read_trylock:
5109 1: lduw [%o0], %g1
5110- add %g1, 1, %g7
5111+ addcc %g1, 1, %g7
5112+
5113+#ifdef CONFIG_PAX_REFCOUNT
5114+ tvs %icc, 6
5115+#endif
5116+
5117 cmp %g7, 0
5118 bl,pn %icc, 2f
5119 mov 0, %o1
5120@@ -51,7 +61,12 @@ __down_write:
5121 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5122 1:
5123 lduw [%o0], %g3
5124- add %g3, %g1, %g7
5125+ addcc %g3, %g1, %g7
5126+
5127+#ifdef CONFIG_PAX_REFCOUNT
5128+ tvs %icc, 6
5129+#endif
5130+
5131 cas [%o0], %g3, %g7
5132 cmp %g3, %g7
5133 bne,pn %icc, 1b
5134@@ -77,7 +92,12 @@ __down_write_trylock:
5135 cmp %g3, 0
5136 bne,pn %icc, 2f
5137 mov 0, %o1
5138- add %g3, %g1, %g7
5139+ addcc %g3, %g1, %g7
5140+
5141+#ifdef CONFIG_PAX_REFCOUNT
5142+ tvs %icc, 6
5143+#endif
5144+
5145 cas [%o0], %g3, %g7
5146 cmp %g3, %g7
5147 bne,pn %icc, 1b
5148@@ -90,7 +110,12 @@ __down_write_trylock:
5149 __up_read:
5150 1:
5151 lduw [%o0], %g1
5152- sub %g1, 1, %g7
5153+ subcc %g1, 1, %g7
5154+
5155+#ifdef CONFIG_PAX_REFCOUNT
5156+ tvs %icc, 6
5157+#endif
5158+
5159 cas [%o0], %g1, %g7
5160 cmp %g1, %g7
5161 bne,pn %icc, 1b
5162@@ -118,7 +143,12 @@ __up_write:
5163 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5164 1:
5165 lduw [%o0], %g3
5166- sub %g3, %g1, %g7
5167+ subcc %g3, %g1, %g7
5168+
5169+#ifdef CONFIG_PAX_REFCOUNT
5170+ tvs %icc, 6
5171+#endif
5172+
5173 cas [%o0], %g3, %g7
5174 cmp %g3, %g7
5175 bne,pn %icc, 1b
5176@@ -143,7 +173,12 @@ __downgrade_write:
5177 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5178 1:
5179 lduw [%o0], %g3
5180- sub %g3, %g1, %g7
5181+ subcc %g3, %g1, %g7
5182+
5183+#ifdef CONFIG_PAX_REFCOUNT
5184+ tvs %icc, 6
5185+#endif
5186+
5187 cas [%o0], %g3, %g7
5188 cmp %g3, %g7
5189 bne,pn %icc, 1b
5190diff -urNp linux-2.6.32.46/arch/sparc/Makefile linux-2.6.32.46/arch/sparc/Makefile
5191--- linux-2.6.32.46/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
5192+++ linux-2.6.32.46/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
5193@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
5194 # Export what is needed by arch/sparc/boot/Makefile
5195 export VMLINUX_INIT VMLINUX_MAIN
5196 VMLINUX_INIT := $(head-y) $(init-y)
5197-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5198+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5199 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5200 VMLINUX_MAIN += $(drivers-y) $(net-y)
5201
5202diff -urNp linux-2.6.32.46/arch/sparc/mm/fault_32.c linux-2.6.32.46/arch/sparc/mm/fault_32.c
5203--- linux-2.6.32.46/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
5204+++ linux-2.6.32.46/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
5205@@ -21,6 +21,9 @@
5206 #include <linux/interrupt.h>
5207 #include <linux/module.h>
5208 #include <linux/kdebug.h>
5209+#include <linux/slab.h>
5210+#include <linux/pagemap.h>
5211+#include <linux/compiler.h>
5212
5213 #include <asm/system.h>
5214 #include <asm/page.h>
5215@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
5216 return safe_compute_effective_address(regs, insn);
5217 }
5218
5219+#ifdef CONFIG_PAX_PAGEEXEC
5220+#ifdef CONFIG_PAX_DLRESOLVE
5221+static void pax_emuplt_close(struct vm_area_struct *vma)
5222+{
5223+ vma->vm_mm->call_dl_resolve = 0UL;
5224+}
5225+
5226+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5227+{
5228+ unsigned int *kaddr;
5229+
5230+ vmf->page = alloc_page(GFP_HIGHUSER);
5231+ if (!vmf->page)
5232+ return VM_FAULT_OOM;
5233+
5234+ kaddr = kmap(vmf->page);
5235+ memset(kaddr, 0, PAGE_SIZE);
5236+ kaddr[0] = 0x9DE3BFA8U; /* save */
5237+ flush_dcache_page(vmf->page);
5238+ kunmap(vmf->page);
5239+ return VM_FAULT_MAJOR;
5240+}
5241+
5242+static const struct vm_operations_struct pax_vm_ops = {
5243+ .close = pax_emuplt_close,
5244+ .fault = pax_emuplt_fault
5245+};
5246+
5247+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5248+{
5249+ int ret;
5250+
5251+ vma->vm_mm = current->mm;
5252+ vma->vm_start = addr;
5253+ vma->vm_end = addr + PAGE_SIZE;
5254+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5255+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5256+ vma->vm_ops = &pax_vm_ops;
5257+
5258+ ret = insert_vm_struct(current->mm, vma);
5259+ if (ret)
5260+ return ret;
5261+
5262+ ++current->mm->total_vm;
5263+ return 0;
5264+}
5265+#endif
5266+
5267+/*
5268+ * PaX: decide what to do with offenders (regs->pc = fault address)
5269+ *
5270+ * returns 1 when task should be killed
5271+ * 2 when patched PLT trampoline was detected
5272+ * 3 when unpatched PLT trampoline was detected
5273+ */
5274+static int pax_handle_fetch_fault(struct pt_regs *regs)
5275+{
5276+
5277+#ifdef CONFIG_PAX_EMUPLT
5278+ int err;
5279+
5280+ do { /* PaX: patched PLT emulation #1 */
5281+ unsigned int sethi1, sethi2, jmpl;
5282+
5283+ err = get_user(sethi1, (unsigned int *)regs->pc);
5284+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5285+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5286+
5287+ if (err)
5288+ break;
5289+
5290+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5291+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5292+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5293+ {
5294+ unsigned int addr;
5295+
5296+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5297+ addr = regs->u_regs[UREG_G1];
5298+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5299+ regs->pc = addr;
5300+ regs->npc = addr+4;
5301+ return 2;
5302+ }
5303+ } while (0);
5304+
5305+ { /* PaX: patched PLT emulation #2 */
5306+ unsigned int ba;
5307+
5308+ err = get_user(ba, (unsigned int *)regs->pc);
5309+
5310+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5311+ unsigned int addr;
5312+
5313+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5314+ regs->pc = addr;
5315+ regs->npc = addr+4;
5316+ return 2;
5317+ }
5318+ }
5319+
5320+ do { /* PaX: patched PLT emulation #3 */
5321+ unsigned int sethi, jmpl, nop;
5322+
5323+ err = get_user(sethi, (unsigned int *)regs->pc);
5324+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5325+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5326+
5327+ if (err)
5328+ break;
5329+
5330+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5331+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5332+ nop == 0x01000000U)
5333+ {
5334+ unsigned int addr;
5335+
5336+ addr = (sethi & 0x003FFFFFU) << 10;
5337+ regs->u_regs[UREG_G1] = addr;
5338+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5339+ regs->pc = addr;
5340+ regs->npc = addr+4;
5341+ return 2;
5342+ }
5343+ } while (0);
5344+
5345+ do { /* PaX: unpatched PLT emulation step 1 */
5346+ unsigned int sethi, ba, nop;
5347+
5348+ err = get_user(sethi, (unsigned int *)regs->pc);
5349+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
5350+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5351+
5352+ if (err)
5353+ break;
5354+
5355+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5356+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5357+ nop == 0x01000000U)
5358+ {
5359+ unsigned int addr, save, call;
5360+
5361+ if ((ba & 0xFFC00000U) == 0x30800000U)
5362+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5363+ else
5364+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5365+
5366+ err = get_user(save, (unsigned int *)addr);
5367+ err |= get_user(call, (unsigned int *)(addr+4));
5368+ err |= get_user(nop, (unsigned int *)(addr+8));
5369+ if (err)
5370+ break;
5371+
5372+#ifdef CONFIG_PAX_DLRESOLVE
5373+ if (save == 0x9DE3BFA8U &&
5374+ (call & 0xC0000000U) == 0x40000000U &&
5375+ nop == 0x01000000U)
5376+ {
5377+ struct vm_area_struct *vma;
5378+ unsigned long call_dl_resolve;
5379+
5380+ down_read(&current->mm->mmap_sem);
5381+ call_dl_resolve = current->mm->call_dl_resolve;
5382+ up_read(&current->mm->mmap_sem);
5383+ if (likely(call_dl_resolve))
5384+ goto emulate;
5385+
5386+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5387+
5388+ down_write(&current->mm->mmap_sem);
5389+ if (current->mm->call_dl_resolve) {
5390+ call_dl_resolve = current->mm->call_dl_resolve;
5391+ up_write(&current->mm->mmap_sem);
5392+ if (vma)
5393+ kmem_cache_free(vm_area_cachep, vma);
5394+ goto emulate;
5395+ }
5396+
5397+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5398+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5399+ up_write(&current->mm->mmap_sem);
5400+ if (vma)
5401+ kmem_cache_free(vm_area_cachep, vma);
5402+ return 1;
5403+ }
5404+
5405+ if (pax_insert_vma(vma, call_dl_resolve)) {
5406+ up_write(&current->mm->mmap_sem);
5407+ kmem_cache_free(vm_area_cachep, vma);
5408+ return 1;
5409+ }
5410+
5411+ current->mm->call_dl_resolve = call_dl_resolve;
5412+ up_write(&current->mm->mmap_sem);
5413+
5414+emulate:
5415+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5416+ regs->pc = call_dl_resolve;
5417+ regs->npc = addr+4;
5418+ return 3;
5419+ }
5420+#endif
5421+
5422+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5423+ if ((save & 0xFFC00000U) == 0x05000000U &&
5424+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5425+ nop == 0x01000000U)
5426+ {
5427+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5428+ regs->u_regs[UREG_G2] = addr + 4;
5429+ addr = (save & 0x003FFFFFU) << 10;
5430+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5431+ regs->pc = addr;
5432+ regs->npc = addr+4;
5433+ return 3;
5434+ }
5435+ }
5436+ } while (0);
5437+
5438+ do { /* PaX: unpatched PLT emulation step 2 */
5439+ unsigned int save, call, nop;
5440+
5441+ err = get_user(save, (unsigned int *)(regs->pc-4));
5442+ err |= get_user(call, (unsigned int *)regs->pc);
5443+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
5444+ if (err)
5445+ break;
5446+
5447+ if (save == 0x9DE3BFA8U &&
5448+ (call & 0xC0000000U) == 0x40000000U &&
5449+ nop == 0x01000000U)
5450+ {
5451+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5452+
5453+ regs->u_regs[UREG_RETPC] = regs->pc;
5454+ regs->pc = dl_resolve;
5455+ regs->npc = dl_resolve+4;
5456+ return 3;
5457+ }
5458+ } while (0);
5459+#endif
5460+
5461+ return 1;
5462+}
5463+
5464+void pax_report_insns(void *pc, void *sp)
5465+{
5466+ unsigned long i;
5467+
5468+ printk(KERN_ERR "PAX: bytes at PC: ");
5469+ for (i = 0; i < 8; i++) {
5470+ unsigned int c;
5471+ if (get_user(c, (unsigned int *)pc+i))
5472+ printk(KERN_CONT "???????? ");
5473+ else
5474+ printk(KERN_CONT "%08x ", c);
5475+ }
5476+ printk("\n");
5477+}
5478+#endif
5479+
5480 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5481 unsigned long address)
5482 {
5483@@ -231,6 +495,24 @@ good_area:
5484 if(!(vma->vm_flags & VM_WRITE))
5485 goto bad_area;
5486 } else {
5487+
5488+#ifdef CONFIG_PAX_PAGEEXEC
5489+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5490+ up_read(&mm->mmap_sem);
5491+ switch (pax_handle_fetch_fault(regs)) {
5492+
5493+#ifdef CONFIG_PAX_EMUPLT
5494+ case 2:
5495+ case 3:
5496+ return;
5497+#endif
5498+
5499+ }
5500+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5501+ do_group_exit(SIGKILL);
5502+ }
5503+#endif
5504+
5505 /* Allow reads even for write-only mappings */
5506 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5507 goto bad_area;
5508diff -urNp linux-2.6.32.46/arch/sparc/mm/fault_64.c linux-2.6.32.46/arch/sparc/mm/fault_64.c
5509--- linux-2.6.32.46/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5510+++ linux-2.6.32.46/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5511@@ -20,6 +20,9 @@
5512 #include <linux/kprobes.h>
5513 #include <linux/kdebug.h>
5514 #include <linux/percpu.h>
5515+#include <linux/slab.h>
5516+#include <linux/pagemap.h>
5517+#include <linux/compiler.h>
5518
5519 #include <asm/page.h>
5520 #include <asm/pgtable.h>
5521@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5522 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5523 regs->tpc);
5524 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5525- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5526+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5527 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5528 dump_stack();
5529 unhandled_fault(regs->tpc, current, regs);
5530@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5531 show_regs(regs);
5532 }
5533
5534+#ifdef CONFIG_PAX_PAGEEXEC
5535+#ifdef CONFIG_PAX_DLRESOLVE
5536+static void pax_emuplt_close(struct vm_area_struct *vma)
5537+{
5538+ vma->vm_mm->call_dl_resolve = 0UL;
5539+}
5540+
5541+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5542+{
5543+ unsigned int *kaddr;
5544+
5545+ vmf->page = alloc_page(GFP_HIGHUSER);
5546+ if (!vmf->page)
5547+ return VM_FAULT_OOM;
5548+
5549+ kaddr = kmap(vmf->page);
5550+ memset(kaddr, 0, PAGE_SIZE);
5551+ kaddr[0] = 0x9DE3BFA8U; /* save */
5552+ flush_dcache_page(vmf->page);
5553+ kunmap(vmf->page);
5554+ return VM_FAULT_MAJOR;
5555+}
5556+
5557+static const struct vm_operations_struct pax_vm_ops = {
5558+ .close = pax_emuplt_close,
5559+ .fault = pax_emuplt_fault
5560+};
5561+
5562+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5563+{
5564+ int ret;
5565+
5566+ vma->vm_mm = current->mm;
5567+ vma->vm_start = addr;
5568+ vma->vm_end = addr + PAGE_SIZE;
5569+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5570+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5571+ vma->vm_ops = &pax_vm_ops;
5572+
5573+ ret = insert_vm_struct(current->mm, vma);
5574+ if (ret)
5575+ return ret;
5576+
5577+ ++current->mm->total_vm;
5578+ return 0;
5579+}
5580+#endif
5581+
5582+/*
5583+ * PaX: decide what to do with offenders (regs->tpc = fault address)
5584+ *
5585+ * returns 1 when task should be killed
5586+ * 2 when patched PLT trampoline was detected
5587+ * 3 when unpatched PLT trampoline was detected
5588+ */
5589+static int pax_handle_fetch_fault(struct pt_regs *regs)
5590+{
5591+
5592+#ifdef CONFIG_PAX_EMUPLT
5593+ int err;
5594+
5595+ do { /* PaX: patched PLT emulation #1 */
5596+ unsigned int sethi1, sethi2, jmpl;
5597+
5598+ err = get_user(sethi1, (unsigned int *)regs->tpc);
5599+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5600+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5601+
5602+ if (err)
5603+ break;
5604+
5605+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5606+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5607+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5608+ {
5609+ unsigned long addr;
5610+
5611+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5612+ addr = regs->u_regs[UREG_G1];
5613+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5614+
5615+ if (test_thread_flag(TIF_32BIT))
5616+ addr &= 0xFFFFFFFFUL;
5617+
5618+ regs->tpc = addr;
5619+ regs->tnpc = addr+4;
5620+ return 2;
5621+ }
5622+ } while (0);
5623+
5624+ { /* PaX: patched PLT emulation #2 */
5625+ unsigned int ba;
5626+
5627+ err = get_user(ba, (unsigned int *)regs->tpc);
5628+
5629+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5630+ unsigned long addr;
5631+
5632+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5633+
5634+ if (test_thread_flag(TIF_32BIT))
5635+ addr &= 0xFFFFFFFFUL;
5636+
5637+ regs->tpc = addr;
5638+ regs->tnpc = addr+4;
5639+ return 2;
5640+ }
5641+ }
5642+
5643+ do { /* PaX: patched PLT emulation #3 */
5644+ unsigned int sethi, jmpl, nop;
5645+
5646+ err = get_user(sethi, (unsigned int *)regs->tpc);
5647+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5648+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5649+
5650+ if (err)
5651+ break;
5652+
5653+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5654+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5655+ nop == 0x01000000U)
5656+ {
5657+ unsigned long addr;
5658+
5659+ addr = (sethi & 0x003FFFFFU) << 10;
5660+ regs->u_regs[UREG_G1] = addr;
5661+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5662+
5663+ if (test_thread_flag(TIF_32BIT))
5664+ addr &= 0xFFFFFFFFUL;
5665+
5666+ regs->tpc = addr;
5667+ regs->tnpc = addr+4;
5668+ return 2;
5669+ }
5670+ } while (0);
5671+
5672+ do { /* PaX: patched PLT emulation #4 */
5673+ unsigned int sethi, mov1, call, mov2;
5674+
5675+ err = get_user(sethi, (unsigned int *)regs->tpc);
5676+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5677+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
5678+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5679+
5680+ if (err)
5681+ break;
5682+
5683+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5684+ mov1 == 0x8210000FU &&
5685+ (call & 0xC0000000U) == 0x40000000U &&
5686+ mov2 == 0x9E100001U)
5687+ {
5688+ unsigned long addr;
5689+
5690+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5691+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5692+
5693+ if (test_thread_flag(TIF_32BIT))
5694+ addr &= 0xFFFFFFFFUL;
5695+
5696+ regs->tpc = addr;
5697+ regs->tnpc = addr+4;
5698+ return 2;
5699+ }
5700+ } while (0);
5701+
5702+ do { /* PaX: patched PLT emulation #5 */
5703+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5704+
5705+ err = get_user(sethi, (unsigned int *)regs->tpc);
5706+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5707+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5708+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5709+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5710+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5711+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5712+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5713+
5714+ if (err)
5715+ break;
5716+
5717+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5718+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5719+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5720+ (or1 & 0xFFFFE000U) == 0x82106000U &&
5721+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5722+ sllx == 0x83287020U &&
5723+ jmpl == 0x81C04005U &&
5724+ nop == 0x01000000U)
5725+ {
5726+ unsigned long addr;
5727+
5728+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5729+ regs->u_regs[UREG_G1] <<= 32;
5730+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5731+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5732+ regs->tpc = addr;
5733+ regs->tnpc = addr+4;
5734+ return 2;
5735+ }
5736+ } while (0);
5737+
5738+ do { /* PaX: patched PLT emulation #6 */
5739+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5740+
5741+ err = get_user(sethi, (unsigned int *)regs->tpc);
5742+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5743+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5744+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5745+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
5746+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5747+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5748+
5749+ if (err)
5750+ break;
5751+
5752+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5753+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5754+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5755+ sllx == 0x83287020U &&
5756+ (or & 0xFFFFE000U) == 0x8A116000U &&
5757+ jmpl == 0x81C04005U &&
5758+ nop == 0x01000000U)
5759+ {
5760+ unsigned long addr;
5761+
5762+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5763+ regs->u_regs[UREG_G1] <<= 32;
5764+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5765+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5766+ regs->tpc = addr;
5767+ regs->tnpc = addr+4;
5768+ return 2;
5769+ }
5770+ } while (0);
5771+
5772+ do { /* PaX: unpatched PLT emulation step 1 */
5773+ unsigned int sethi, ba, nop;
5774+
5775+ err = get_user(sethi, (unsigned int *)regs->tpc);
5776+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5777+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5778+
5779+ if (err)
5780+ break;
5781+
5782+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5783+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5784+ nop == 0x01000000U)
5785+ {
5786+ unsigned long addr;
5787+ unsigned int save, call;
5788+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5789+
5790+ if ((ba & 0xFFC00000U) == 0x30800000U)
5791+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5792+ else
5793+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5794+
5795+ if (test_thread_flag(TIF_32BIT))
5796+ addr &= 0xFFFFFFFFUL;
5797+
5798+ err = get_user(save, (unsigned int *)addr);
5799+ err |= get_user(call, (unsigned int *)(addr+4));
5800+ err |= get_user(nop, (unsigned int *)(addr+8));
5801+ if (err)
5802+ break;
5803+
5804+#ifdef CONFIG_PAX_DLRESOLVE
5805+ if (save == 0x9DE3BFA8U &&
5806+ (call & 0xC0000000U) == 0x40000000U &&
5807+ nop == 0x01000000U)
5808+ {
5809+ struct vm_area_struct *vma;
5810+ unsigned long call_dl_resolve;
5811+
5812+ down_read(&current->mm->mmap_sem);
5813+ call_dl_resolve = current->mm->call_dl_resolve;
5814+ up_read(&current->mm->mmap_sem);
5815+ if (likely(call_dl_resolve))
5816+ goto emulate;
5817+
5818+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5819+
5820+ down_write(&current->mm->mmap_sem);
5821+ if (current->mm->call_dl_resolve) {
5822+ call_dl_resolve = current->mm->call_dl_resolve;
5823+ up_write(&current->mm->mmap_sem);
5824+ if (vma)
5825+ kmem_cache_free(vm_area_cachep, vma);
5826+ goto emulate;
5827+ }
5828+
5829+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5830+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5831+ up_write(&current->mm->mmap_sem);
5832+ if (vma)
5833+ kmem_cache_free(vm_area_cachep, vma);
5834+ return 1;
5835+ }
5836+
5837+ if (pax_insert_vma(vma, call_dl_resolve)) {
5838+ up_write(&current->mm->mmap_sem);
5839+ kmem_cache_free(vm_area_cachep, vma);
5840+ return 1;
5841+ }
5842+
5843+ current->mm->call_dl_resolve = call_dl_resolve;
5844+ up_write(&current->mm->mmap_sem);
5845+
5846+emulate:
5847+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5848+ regs->tpc = call_dl_resolve;
5849+ regs->tnpc = addr+4;
5850+ return 3;
5851+ }
5852+#endif
5853+
5854+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5855+ if ((save & 0xFFC00000U) == 0x05000000U &&
5856+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5857+ nop == 0x01000000U)
5858+ {
5859+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5860+ regs->u_regs[UREG_G2] = addr + 4;
5861+ addr = (save & 0x003FFFFFU) << 10;
5862+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5863+
5864+ if (test_thread_flag(TIF_32BIT))
5865+ addr &= 0xFFFFFFFFUL;
5866+
5867+ regs->tpc = addr;
5868+ regs->tnpc = addr+4;
5869+ return 3;
5870+ }
5871+
5872+ /* PaX: 64-bit PLT stub */
5873+ err = get_user(sethi1, (unsigned int *)addr);
5874+ err |= get_user(sethi2, (unsigned int *)(addr+4));
5875+ err |= get_user(or1, (unsigned int *)(addr+8));
5876+ err |= get_user(or2, (unsigned int *)(addr+12));
5877+ err |= get_user(sllx, (unsigned int *)(addr+16));
5878+ err |= get_user(add, (unsigned int *)(addr+20));
5879+ err |= get_user(jmpl, (unsigned int *)(addr+24));
5880+ err |= get_user(nop, (unsigned int *)(addr+28));
5881+ if (err)
5882+ break;
5883+
5884+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5885+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5886+ (or1 & 0xFFFFE000U) == 0x88112000U &&
5887+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5888+ sllx == 0x89293020U &&
5889+ add == 0x8A010005U &&
5890+ jmpl == 0x89C14000U &&
5891+ nop == 0x01000000U)
5892+ {
5893+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5894+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5895+ regs->u_regs[UREG_G4] <<= 32;
5896+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5897+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5898+ regs->u_regs[UREG_G4] = addr + 24;
5899+ addr = regs->u_regs[UREG_G5];
5900+ regs->tpc = addr;
5901+ regs->tnpc = addr+4;
5902+ return 3;
5903+ }
5904+ }
5905+ } while (0);
5906+
5907+#ifdef CONFIG_PAX_DLRESOLVE
5908+ do { /* PaX: unpatched PLT emulation step 2 */
5909+ unsigned int save, call, nop;
5910+
5911+ err = get_user(save, (unsigned int *)(regs->tpc-4));
5912+ err |= get_user(call, (unsigned int *)regs->tpc);
5913+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5914+ if (err)
5915+ break;
5916+
5917+ if (save == 0x9DE3BFA8U &&
5918+ (call & 0xC0000000U) == 0x40000000U &&
5919+ nop == 0x01000000U)
5920+ {
5921+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5922+
5923+ if (test_thread_flag(TIF_32BIT))
5924+ dl_resolve &= 0xFFFFFFFFUL;
5925+
5926+ regs->u_regs[UREG_RETPC] = regs->tpc;
5927+ regs->tpc = dl_resolve;
5928+ regs->tnpc = dl_resolve+4;
5929+ return 3;
5930+ }
5931+ } while (0);
5932+#endif
5933+
5934+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5935+ unsigned int sethi, ba, nop;
5936+
5937+ err = get_user(sethi, (unsigned int *)regs->tpc);
5938+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5939+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5940+
5941+ if (err)
5942+ break;
5943+
5944+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5945+ (ba & 0xFFF00000U) == 0x30600000U &&
5946+ nop == 0x01000000U)
5947+ {
5948+ unsigned long addr;
5949+
5950+ addr = (sethi & 0x003FFFFFU) << 10;
5951+ regs->u_regs[UREG_G1] = addr;
5952+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5953+
5954+ if (test_thread_flag(TIF_32BIT))
5955+ addr &= 0xFFFFFFFFUL;
5956+
5957+ regs->tpc = addr;
5958+ regs->tnpc = addr+4;
5959+ return 2;
5960+ }
5961+ } while (0);
5962+
5963+#endif
5964+
5965+ return 1;
5966+}
5967+
5968+void pax_report_insns(void *pc, void *sp)
5969+{
5970+ unsigned long i;
5971+
5972+ printk(KERN_ERR "PAX: bytes at PC: ");
5973+ for (i = 0; i < 8; i++) {
5974+ unsigned int c;
5975+ if (get_user(c, (unsigned int *)pc+i))
5976+ printk(KERN_CONT "???????? ");
5977+ else
5978+ printk(KERN_CONT "%08x ", c);
5979+ }
5980+ printk("\n");
5981+}
5982+#endif
5983+
5984 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5985 {
5986 struct mm_struct *mm = current->mm;
5987@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5988 if (!vma)
5989 goto bad_area;
5990
5991+#ifdef CONFIG_PAX_PAGEEXEC
5992+ /* PaX: detect ITLB misses on non-exec pages */
5993+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5994+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5995+ {
5996+ if (address != regs->tpc)
5997+ goto good_area;
5998+
5999+ up_read(&mm->mmap_sem);
6000+ switch (pax_handle_fetch_fault(regs)) {
6001+
6002+#ifdef CONFIG_PAX_EMUPLT
6003+ case 2:
6004+ case 3:
6005+ return;
6006+#endif
6007+
6008+ }
6009+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6010+ do_group_exit(SIGKILL);
6011+ }
6012+#endif
6013+
6014 /* Pure DTLB misses do not tell us whether the fault causing
6015 * load/store/atomic was a write or not, it only says that there
6016 * was no match. So in such a case we (carefully) read the
6017diff -urNp linux-2.6.32.46/arch/sparc/mm/hugetlbpage.c linux-2.6.32.46/arch/sparc/mm/hugetlbpage.c
6018--- linux-2.6.32.46/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
6019+++ linux-2.6.32.46/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
6020@@ -69,7 +69,7 @@ full_search:
6021 }
6022 return -ENOMEM;
6023 }
6024- if (likely(!vma || addr + len <= vma->vm_start)) {
6025+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6026 /*
6027 * Remember the place where we stopped the search:
6028 */
6029@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
6030 /* make sure it can fit in the remaining address space */
6031 if (likely(addr > len)) {
6032 vma = find_vma(mm, addr-len);
6033- if (!vma || addr <= vma->vm_start) {
6034+ if (check_heap_stack_gap(vma, addr - len, len)) {
6035 /* remember the address as a hint for next time */
6036 return (mm->free_area_cache = addr-len);
6037 }
6038@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
6039 if (unlikely(mm->mmap_base < len))
6040 goto bottomup;
6041
6042- addr = (mm->mmap_base-len) & HPAGE_MASK;
6043+ addr = mm->mmap_base - len;
6044
6045 do {
6046+ addr &= HPAGE_MASK;
6047 /*
6048 * Lookup failure means no vma is above this address,
6049 * else if new region fits below vma->vm_start,
6050 * return with success:
6051 */
6052 vma = find_vma(mm, addr);
6053- if (likely(!vma || addr+len <= vma->vm_start)) {
6054+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6055 /* remember the address as a hint for next time */
6056 return (mm->free_area_cache = addr);
6057 }
6058@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
6059 mm->cached_hole_size = vma->vm_start - addr;
6060
6061 /* try just below the current vma->vm_start */
6062- addr = (vma->vm_start-len) & HPAGE_MASK;
6063- } while (likely(len < vma->vm_start));
6064+ addr = skip_heap_stack_gap(vma, len);
6065+ } while (!IS_ERR_VALUE(addr));
6066
6067 bottomup:
6068 /*
6069@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
6070 if (addr) {
6071 addr = ALIGN(addr, HPAGE_SIZE);
6072 vma = find_vma(mm, addr);
6073- if (task_size - len >= addr &&
6074- (!vma || addr + len <= vma->vm_start))
6075+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6076 return addr;
6077 }
6078 if (mm->get_unmapped_area == arch_get_unmapped_area)
6079diff -urNp linux-2.6.32.46/arch/sparc/mm/init_32.c linux-2.6.32.46/arch/sparc/mm/init_32.c
6080--- linux-2.6.32.46/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
6081+++ linux-2.6.32.46/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
6082@@ -317,6 +317,9 @@ extern void device_scan(void);
6083 pgprot_t PAGE_SHARED __read_mostly;
6084 EXPORT_SYMBOL(PAGE_SHARED);
6085
6086+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6087+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6088+
6089 void __init paging_init(void)
6090 {
6091 switch(sparc_cpu_model) {
6092@@ -345,17 +348,17 @@ void __init paging_init(void)
6093
6094 /* Initialize the protection map with non-constant, MMU dependent values. */
6095 protection_map[0] = PAGE_NONE;
6096- protection_map[1] = PAGE_READONLY;
6097- protection_map[2] = PAGE_COPY;
6098- protection_map[3] = PAGE_COPY;
6099+ protection_map[1] = PAGE_READONLY_NOEXEC;
6100+ protection_map[2] = PAGE_COPY_NOEXEC;
6101+ protection_map[3] = PAGE_COPY_NOEXEC;
6102 protection_map[4] = PAGE_READONLY;
6103 protection_map[5] = PAGE_READONLY;
6104 protection_map[6] = PAGE_COPY;
6105 protection_map[7] = PAGE_COPY;
6106 protection_map[8] = PAGE_NONE;
6107- protection_map[9] = PAGE_READONLY;
6108- protection_map[10] = PAGE_SHARED;
6109- protection_map[11] = PAGE_SHARED;
6110+ protection_map[9] = PAGE_READONLY_NOEXEC;
6111+ protection_map[10] = PAGE_SHARED_NOEXEC;
6112+ protection_map[11] = PAGE_SHARED_NOEXEC;
6113 protection_map[12] = PAGE_READONLY;
6114 protection_map[13] = PAGE_READONLY;
6115 protection_map[14] = PAGE_SHARED;
6116diff -urNp linux-2.6.32.46/arch/sparc/mm/Makefile linux-2.6.32.46/arch/sparc/mm/Makefile
6117--- linux-2.6.32.46/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
6118+++ linux-2.6.32.46/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
6119@@ -2,7 +2,7 @@
6120 #
6121
6122 asflags-y := -ansi
6123-ccflags-y := -Werror
6124+#ccflags-y := -Werror
6125
6126 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6127 obj-y += fault_$(BITS).o
6128diff -urNp linux-2.6.32.46/arch/sparc/mm/srmmu.c linux-2.6.32.46/arch/sparc/mm/srmmu.c
6129--- linux-2.6.32.46/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
6130+++ linux-2.6.32.46/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
6131@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6132 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6133 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6134 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6135+
6136+#ifdef CONFIG_PAX_PAGEEXEC
6137+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6138+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6139+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6140+#endif
6141+
6142 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6143 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6144
6145diff -urNp linux-2.6.32.46/arch/um/include/asm/kmap_types.h linux-2.6.32.46/arch/um/include/asm/kmap_types.h
6146--- linux-2.6.32.46/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
6147+++ linux-2.6.32.46/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
6148@@ -23,6 +23,7 @@ enum km_type {
6149 KM_IRQ1,
6150 KM_SOFTIRQ0,
6151 KM_SOFTIRQ1,
6152+ KM_CLEARPAGE,
6153 KM_TYPE_NR
6154 };
6155
6156diff -urNp linux-2.6.32.46/arch/um/include/asm/page.h linux-2.6.32.46/arch/um/include/asm/page.h
6157--- linux-2.6.32.46/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
6158+++ linux-2.6.32.46/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
6159@@ -14,6 +14,9 @@
6160 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6161 #define PAGE_MASK (~(PAGE_SIZE-1))
6162
6163+#define ktla_ktva(addr) (addr)
6164+#define ktva_ktla(addr) (addr)
6165+
6166 #ifndef __ASSEMBLY__
6167
6168 struct page;
6169diff -urNp linux-2.6.32.46/arch/um/kernel/process.c linux-2.6.32.46/arch/um/kernel/process.c
6170--- linux-2.6.32.46/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
6171+++ linux-2.6.32.46/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
6172@@ -393,22 +393,6 @@ int singlestepping(void * t)
6173 return 2;
6174 }
6175
6176-/*
6177- * Only x86 and x86_64 have an arch_align_stack().
6178- * All other arches have "#define arch_align_stack(x) (x)"
6179- * in their asm/system.h
6180- * As this is included in UML from asm-um/system-generic.h,
6181- * we can use it to behave as the subarch does.
6182- */
6183-#ifndef arch_align_stack
6184-unsigned long arch_align_stack(unsigned long sp)
6185-{
6186- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6187- sp -= get_random_int() % 8192;
6188- return sp & ~0xf;
6189-}
6190-#endif
6191-
6192 unsigned long get_wchan(struct task_struct *p)
6193 {
6194 unsigned long stack_page, sp, ip;
6195diff -urNp linux-2.6.32.46/arch/um/sys-i386/syscalls.c linux-2.6.32.46/arch/um/sys-i386/syscalls.c
6196--- linux-2.6.32.46/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
6197+++ linux-2.6.32.46/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
6198@@ -11,6 +11,21 @@
6199 #include "asm/uaccess.h"
6200 #include "asm/unistd.h"
6201
6202+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6203+{
6204+ unsigned long pax_task_size = TASK_SIZE;
6205+
6206+#ifdef CONFIG_PAX_SEGMEXEC
6207+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6208+ pax_task_size = SEGMEXEC_TASK_SIZE;
6209+#endif
6210+
6211+ if (len > pax_task_size || addr > pax_task_size - len)
6212+ return -EINVAL;
6213+
6214+ return 0;
6215+}
6216+
6217 /*
6218 * Perform the select(nd, in, out, ex, tv) and mmap() system
6219 * calls. Linux/i386 didn't use to be able to handle more than
6220diff -urNp linux-2.6.32.46/arch/x86/boot/bitops.h linux-2.6.32.46/arch/x86/boot/bitops.h
6221--- linux-2.6.32.46/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
6222+++ linux-2.6.32.46/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
6223@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
6224 u8 v;
6225 const u32 *p = (const u32 *)addr;
6226
6227- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6228+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6229 return v;
6230 }
6231
6232@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
6233
6234 static inline void set_bit(int nr, void *addr)
6235 {
6236- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6237+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6238 }
6239
6240 #endif /* BOOT_BITOPS_H */
6241diff -urNp linux-2.6.32.46/arch/x86/boot/boot.h linux-2.6.32.46/arch/x86/boot/boot.h
6242--- linux-2.6.32.46/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
6243+++ linux-2.6.32.46/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
6244@@ -82,7 +82,7 @@ static inline void io_delay(void)
6245 static inline u16 ds(void)
6246 {
6247 u16 seg;
6248- asm("movw %%ds,%0" : "=rm" (seg));
6249+ asm volatile("movw %%ds,%0" : "=rm" (seg));
6250 return seg;
6251 }
6252
6253@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
6254 static inline int memcmp(const void *s1, const void *s2, size_t len)
6255 {
6256 u8 diff;
6257- asm("repe; cmpsb; setnz %0"
6258+ asm volatile("repe; cmpsb; setnz %0"
6259 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6260 return diff;
6261 }
6262diff -urNp linux-2.6.32.46/arch/x86/boot/compressed/head_32.S linux-2.6.32.46/arch/x86/boot/compressed/head_32.S
6263--- linux-2.6.32.46/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
6264+++ linux-2.6.32.46/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
6265@@ -76,7 +76,7 @@ ENTRY(startup_32)
6266 notl %eax
6267 andl %eax, %ebx
6268 #else
6269- movl $LOAD_PHYSICAL_ADDR, %ebx
6270+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6271 #endif
6272
6273 /* Target address to relocate to for decompression */
6274@@ -149,7 +149,7 @@ relocated:
6275 * and where it was actually loaded.
6276 */
6277 movl %ebp, %ebx
6278- subl $LOAD_PHYSICAL_ADDR, %ebx
6279+ subl $____LOAD_PHYSICAL_ADDR, %ebx
6280 jz 2f /* Nothing to be done if loaded at compiled addr. */
6281 /*
6282 * Process relocations.
6283@@ -157,8 +157,7 @@ relocated:
6284
6285 1: subl $4, %edi
6286 movl (%edi), %ecx
6287- testl %ecx, %ecx
6288- jz 2f
6289+ jecxz 2f
6290 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6291 jmp 1b
6292 2:
6293diff -urNp linux-2.6.32.46/arch/x86/boot/compressed/head_64.S linux-2.6.32.46/arch/x86/boot/compressed/head_64.S
6294--- linux-2.6.32.46/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
6295+++ linux-2.6.32.46/arch/x86/boot/compressed/head_64.S 2011-07-01 18:53:00.000000000 -0400
6296@@ -91,7 +91,7 @@ ENTRY(startup_32)
6297 notl %eax
6298 andl %eax, %ebx
6299 #else
6300- movl $LOAD_PHYSICAL_ADDR, %ebx
6301+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6302 #endif
6303
6304 /* Target address to relocate to for decompression */
6305@@ -183,7 +183,7 @@ no_longmode:
6306 hlt
6307 jmp 1b
6308
6309-#include "../../kernel/verify_cpu_64.S"
6310+#include "../../kernel/verify_cpu.S"
6311
6312 /*
6313 * Be careful here startup_64 needs to be at a predictable
6314@@ -234,7 +234,7 @@ ENTRY(startup_64)
6315 notq %rax
6316 andq %rax, %rbp
6317 #else
6318- movq $LOAD_PHYSICAL_ADDR, %rbp
6319+ movq $____LOAD_PHYSICAL_ADDR, %rbp
6320 #endif
6321
6322 /* Target address to relocate to for decompression */
6323diff -urNp linux-2.6.32.46/arch/x86/boot/compressed/Makefile linux-2.6.32.46/arch/x86/boot/compressed/Makefile
6324--- linux-2.6.32.46/arch/x86/boot/compressed/Makefile 2011-03-27 14:31:47.000000000 -0400
6325+++ linux-2.6.32.46/arch/x86/boot/compressed/Makefile 2011-08-07 14:38:34.000000000 -0400
6326@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
6327 KBUILD_CFLAGS += $(cflags-y)
6328 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
6329 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
6330+ifdef CONSTIFY_PLUGIN
6331+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6332+endif
6333
6334 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6335 GCOV_PROFILE := n
6336diff -urNp linux-2.6.32.46/arch/x86/boot/compressed/misc.c linux-2.6.32.46/arch/x86/boot/compressed/misc.c
6337--- linux-2.6.32.46/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
6338+++ linux-2.6.32.46/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
6339@@ -288,7 +288,7 @@ static void parse_elf(void *output)
6340 case PT_LOAD:
6341 #ifdef CONFIG_RELOCATABLE
6342 dest = output;
6343- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6344+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6345 #else
6346 dest = (void *)(phdr->p_paddr);
6347 #endif
6348@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6349 error("Destination address too large");
6350 #endif
6351 #ifndef CONFIG_RELOCATABLE
6352- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6353+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6354 error("Wrong destination address");
6355 #endif
6356
6357diff -urNp linux-2.6.32.46/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.46/arch/x86/boot/compressed/mkpiggy.c
6358--- linux-2.6.32.46/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
6359+++ linux-2.6.32.46/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
6360@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6361
6362 offs = (olen > ilen) ? olen - ilen : 0;
6363 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6364- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6365+ offs += 64*1024; /* Add 64K bytes slack */
6366 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6367
6368 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6369diff -urNp linux-2.6.32.46/arch/x86/boot/compressed/relocs.c linux-2.6.32.46/arch/x86/boot/compressed/relocs.c
6370--- linux-2.6.32.46/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
6371+++ linux-2.6.32.46/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
6372@@ -10,8 +10,11 @@
6373 #define USE_BSD
6374 #include <endian.h>
6375
6376+#include "../../../../include/linux/autoconf.h"
6377+
6378 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6379 static Elf32_Ehdr ehdr;
6380+static Elf32_Phdr *phdr;
6381 static unsigned long reloc_count, reloc_idx;
6382 static unsigned long *relocs;
6383
6384@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6385
6386 static int is_safe_abs_reloc(const char* sym_name)
6387 {
6388- int i;
6389+ unsigned int i;
6390
6391 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6392 if (!strcmp(sym_name, safe_abs_relocs[i]))
6393@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6394 }
6395 }
6396
6397+static void read_phdrs(FILE *fp)
6398+{
6399+ unsigned int i;
6400+
6401+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6402+ if (!phdr) {
6403+ die("Unable to allocate %d program headers\n",
6404+ ehdr.e_phnum);
6405+ }
6406+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6407+ die("Seek to %d failed: %s\n",
6408+ ehdr.e_phoff, strerror(errno));
6409+ }
6410+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6411+ die("Cannot read ELF program headers: %s\n",
6412+ strerror(errno));
6413+ }
6414+ for(i = 0; i < ehdr.e_phnum; i++) {
6415+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6416+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6417+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6418+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6419+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6420+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6421+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6422+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6423+ }
6424+
6425+}
6426+
6427 static void read_shdrs(FILE *fp)
6428 {
6429- int i;
6430+ unsigned int i;
6431 Elf32_Shdr shdr;
6432
6433 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6434@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6435
6436 static void read_strtabs(FILE *fp)
6437 {
6438- int i;
6439+ unsigned int i;
6440 for (i = 0; i < ehdr.e_shnum; i++) {
6441 struct section *sec = &secs[i];
6442 if (sec->shdr.sh_type != SHT_STRTAB) {
6443@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6444
6445 static void read_symtabs(FILE *fp)
6446 {
6447- int i,j;
6448+ unsigned int i,j;
6449 for (i = 0; i < ehdr.e_shnum; i++) {
6450 struct section *sec = &secs[i];
6451 if (sec->shdr.sh_type != SHT_SYMTAB) {
6452@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6453
6454 static void read_relocs(FILE *fp)
6455 {
6456- int i,j;
6457+ unsigned int i,j;
6458+ uint32_t base;
6459+
6460 for (i = 0; i < ehdr.e_shnum; i++) {
6461 struct section *sec = &secs[i];
6462 if (sec->shdr.sh_type != SHT_REL) {
6463@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6464 die("Cannot read symbol table: %s\n",
6465 strerror(errno));
6466 }
6467+ base = 0;
6468+ for (j = 0; j < ehdr.e_phnum; j++) {
6469+ if (phdr[j].p_type != PT_LOAD )
6470+ continue;
6471+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6472+ continue;
6473+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6474+ break;
6475+ }
6476 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6477 Elf32_Rel *rel = &sec->reltab[j];
6478- rel->r_offset = elf32_to_cpu(rel->r_offset);
6479+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6480 rel->r_info = elf32_to_cpu(rel->r_info);
6481 }
6482 }
6483@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6484
6485 static void print_absolute_symbols(void)
6486 {
6487- int i;
6488+ unsigned int i;
6489 printf("Absolute symbols\n");
6490 printf(" Num: Value Size Type Bind Visibility Name\n");
6491 for (i = 0; i < ehdr.e_shnum; i++) {
6492 struct section *sec = &secs[i];
6493 char *sym_strtab;
6494 Elf32_Sym *sh_symtab;
6495- int j;
6496+ unsigned int j;
6497
6498 if (sec->shdr.sh_type != SHT_SYMTAB) {
6499 continue;
6500@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6501
6502 static void print_absolute_relocs(void)
6503 {
6504- int i, printed = 0;
6505+ unsigned int i, printed = 0;
6506
6507 for (i = 0; i < ehdr.e_shnum; i++) {
6508 struct section *sec = &secs[i];
6509 struct section *sec_applies, *sec_symtab;
6510 char *sym_strtab;
6511 Elf32_Sym *sh_symtab;
6512- int j;
6513+ unsigned int j;
6514 if (sec->shdr.sh_type != SHT_REL) {
6515 continue;
6516 }
6517@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6518
6519 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6520 {
6521- int i;
6522+ unsigned int i;
6523 /* Walk through the relocations */
6524 for (i = 0; i < ehdr.e_shnum; i++) {
6525 char *sym_strtab;
6526 Elf32_Sym *sh_symtab;
6527 struct section *sec_applies, *sec_symtab;
6528- int j;
6529+ unsigned int j;
6530 struct section *sec = &secs[i];
6531
6532 if (sec->shdr.sh_type != SHT_REL) {
6533@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6534 if (sym->st_shndx == SHN_ABS) {
6535 continue;
6536 }
6537+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6538+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6539+ continue;
6540+
6541+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6542+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6543+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6544+ continue;
6545+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6546+ continue;
6547+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6548+ continue;
6549+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6550+ continue;
6551+#endif
6552 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6553 /*
6554 * NONE can be ignored and and PC relative
6555@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6556
6557 static void emit_relocs(int as_text)
6558 {
6559- int i;
6560+ unsigned int i;
6561 /* Count how many relocations I have and allocate space for them. */
6562 reloc_count = 0;
6563 walk_relocs(count_reloc);
6564@@ -634,6 +693,7 @@ int main(int argc, char **argv)
6565 fname, strerror(errno));
6566 }
6567 read_ehdr(fp);
6568+ read_phdrs(fp);
6569 read_shdrs(fp);
6570 read_strtabs(fp);
6571 read_symtabs(fp);
6572diff -urNp linux-2.6.32.46/arch/x86/boot/cpucheck.c linux-2.6.32.46/arch/x86/boot/cpucheck.c
6573--- linux-2.6.32.46/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6574+++ linux-2.6.32.46/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6575@@ -74,7 +74,7 @@ static int has_fpu(void)
6576 u16 fcw = -1, fsw = -1;
6577 u32 cr0;
6578
6579- asm("movl %%cr0,%0" : "=r" (cr0));
6580+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
6581 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6582 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6583 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6584@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6585 {
6586 u32 f0, f1;
6587
6588- asm("pushfl ; "
6589+ asm volatile("pushfl ; "
6590 "pushfl ; "
6591 "popl %0 ; "
6592 "movl %0,%1 ; "
6593@@ -115,7 +115,7 @@ static void get_flags(void)
6594 set_bit(X86_FEATURE_FPU, cpu.flags);
6595
6596 if (has_eflag(X86_EFLAGS_ID)) {
6597- asm("cpuid"
6598+ asm volatile("cpuid"
6599 : "=a" (max_intel_level),
6600 "=b" (cpu_vendor[0]),
6601 "=d" (cpu_vendor[1]),
6602@@ -124,7 +124,7 @@ static void get_flags(void)
6603
6604 if (max_intel_level >= 0x00000001 &&
6605 max_intel_level <= 0x0000ffff) {
6606- asm("cpuid"
6607+ asm volatile("cpuid"
6608 : "=a" (tfms),
6609 "=c" (cpu.flags[4]),
6610 "=d" (cpu.flags[0])
6611@@ -136,7 +136,7 @@ static void get_flags(void)
6612 cpu.model += ((tfms >> 16) & 0xf) << 4;
6613 }
6614
6615- asm("cpuid"
6616+ asm volatile("cpuid"
6617 : "=a" (max_amd_level)
6618 : "a" (0x80000000)
6619 : "ebx", "ecx", "edx");
6620@@ -144,7 +144,7 @@ static void get_flags(void)
6621 if (max_amd_level >= 0x80000001 &&
6622 max_amd_level <= 0x8000ffff) {
6623 u32 eax = 0x80000001;
6624- asm("cpuid"
6625+ asm volatile("cpuid"
6626 : "+a" (eax),
6627 "=c" (cpu.flags[6]),
6628 "=d" (cpu.flags[1])
6629@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6630 u32 ecx = MSR_K7_HWCR;
6631 u32 eax, edx;
6632
6633- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6634+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6635 eax &= ~(1 << 15);
6636- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6637+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6638
6639 get_flags(); /* Make sure it really did something */
6640 err = check_flags();
6641@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6642 u32 ecx = MSR_VIA_FCR;
6643 u32 eax, edx;
6644
6645- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6646+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6647 eax |= (1<<1)|(1<<7);
6648- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6649+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6650
6651 set_bit(X86_FEATURE_CX8, cpu.flags);
6652 err = check_flags();
6653@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6654 u32 eax, edx;
6655 u32 level = 1;
6656
6657- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6658- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6659- asm("cpuid"
6660+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6661+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6662+ asm volatile("cpuid"
6663 : "+a" (level), "=d" (cpu.flags[0])
6664 : : "ecx", "ebx");
6665- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6666+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6667
6668 err = check_flags();
6669 }
6670diff -urNp linux-2.6.32.46/arch/x86/boot/header.S linux-2.6.32.46/arch/x86/boot/header.S
6671--- linux-2.6.32.46/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6672+++ linux-2.6.32.46/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6673@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6674 # single linked list of
6675 # struct setup_data
6676
6677-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6678+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6679
6680 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6681 #define VO_INIT_SIZE (VO__end - VO__text)
6682diff -urNp linux-2.6.32.46/arch/x86/boot/Makefile linux-2.6.32.46/arch/x86/boot/Makefile
6683--- linux-2.6.32.46/arch/x86/boot/Makefile 2011-03-27 14:31:47.000000000 -0400
6684+++ linux-2.6.32.46/arch/x86/boot/Makefile 2011-08-07 14:38:13.000000000 -0400
6685@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
6686 $(call cc-option, -fno-stack-protector) \
6687 $(call cc-option, -mpreferred-stack-boundary=2)
6688 KBUILD_CFLAGS += $(call cc-option, -m32)
6689+ifdef CONSTIFY_PLUGIN
6690+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6691+endif
6692 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6693 GCOV_PROFILE := n
6694
6695diff -urNp linux-2.6.32.46/arch/x86/boot/memory.c linux-2.6.32.46/arch/x86/boot/memory.c
6696--- linux-2.6.32.46/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6697+++ linux-2.6.32.46/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6698@@ -19,7 +19,7 @@
6699
6700 static int detect_memory_e820(void)
6701 {
6702- int count = 0;
6703+ unsigned int count = 0;
6704 struct biosregs ireg, oreg;
6705 struct e820entry *desc = boot_params.e820_map;
6706 static struct e820entry buf; /* static so it is zeroed */
6707diff -urNp linux-2.6.32.46/arch/x86/boot/video.c linux-2.6.32.46/arch/x86/boot/video.c
6708--- linux-2.6.32.46/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6709+++ linux-2.6.32.46/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6710@@ -90,7 +90,7 @@ static void store_mode_params(void)
6711 static unsigned int get_entry(void)
6712 {
6713 char entry_buf[4];
6714- int i, len = 0;
6715+ unsigned int i, len = 0;
6716 int key;
6717 unsigned int v;
6718
6719diff -urNp linux-2.6.32.46/arch/x86/boot/video-vesa.c linux-2.6.32.46/arch/x86/boot/video-vesa.c
6720--- linux-2.6.32.46/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6721+++ linux-2.6.32.46/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6722@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6723
6724 boot_params.screen_info.vesapm_seg = oreg.es;
6725 boot_params.screen_info.vesapm_off = oreg.di;
6726+ boot_params.screen_info.vesapm_size = oreg.cx;
6727 }
6728
6729 /*
6730diff -urNp linux-2.6.32.46/arch/x86/ia32/ia32_aout.c linux-2.6.32.46/arch/x86/ia32/ia32_aout.c
6731--- linux-2.6.32.46/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6732+++ linux-2.6.32.46/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6733@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6734 unsigned long dump_start, dump_size;
6735 struct user32 dump;
6736
6737+ memset(&dump, 0, sizeof(dump));
6738+
6739 fs = get_fs();
6740 set_fs(KERNEL_DS);
6741 has_dumped = 1;
6742@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6743 dump_size = dump.u_ssize << PAGE_SHIFT;
6744 DUMP_WRITE(dump_start, dump_size);
6745 }
6746- /*
6747- * Finally dump the task struct. Not be used by gdb, but
6748- * could be useful
6749- */
6750- set_fs(KERNEL_DS);
6751- DUMP_WRITE(current, sizeof(*current));
6752 end_coredump:
6753 set_fs(fs);
6754 return has_dumped;
6755diff -urNp linux-2.6.32.46/arch/x86/ia32/ia32entry.S linux-2.6.32.46/arch/x86/ia32/ia32entry.S
6756--- linux-2.6.32.46/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6757+++ linux-2.6.32.46/arch/x86/ia32/ia32entry.S 2011-08-25 17:42:18.000000000 -0400
6758@@ -13,6 +13,7 @@
6759 #include <asm/thread_info.h>
6760 #include <asm/segment.h>
6761 #include <asm/irqflags.h>
6762+#include <asm/pgtable.h>
6763 #include <linux/linkage.h>
6764
6765 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6766@@ -93,6 +94,29 @@ ENTRY(native_irq_enable_sysexit)
6767 ENDPROC(native_irq_enable_sysexit)
6768 #endif
6769
6770+ .macro pax_enter_kernel_user
6771+#ifdef CONFIG_PAX_MEMORY_UDEREF
6772+ call pax_enter_kernel_user
6773+#endif
6774+ .endm
6775+
6776+ .macro pax_exit_kernel_user
6777+#ifdef CONFIG_PAX_MEMORY_UDEREF
6778+ call pax_exit_kernel_user
6779+#endif
6780+#ifdef CONFIG_PAX_RANDKSTACK
6781+ pushq %rax
6782+ call pax_randomize_kstack
6783+ popq %rax
6784+#endif
6785+ .endm
6786+
6787+.macro pax_erase_kstack
6788+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6789+ call pax_erase_kstack
6790+#endif
6791+.endm
6792+
6793 /*
6794 * 32bit SYSENTER instruction entry.
6795 *
6796@@ -119,7 +143,7 @@ ENTRY(ia32_sysenter_target)
6797 CFI_REGISTER rsp,rbp
6798 SWAPGS_UNSAFE_STACK
6799 movq PER_CPU_VAR(kernel_stack), %rsp
6800- addq $(KERNEL_STACK_OFFSET),%rsp
6801+ pax_enter_kernel_user
6802 /*
6803 * No need to follow this irqs on/off section: the syscall
6804 * disabled irqs, here we enable it straight after entry:
6805@@ -135,7 +159,8 @@ ENTRY(ia32_sysenter_target)
6806 pushfq
6807 CFI_ADJUST_CFA_OFFSET 8
6808 /*CFI_REL_OFFSET rflags,0*/
6809- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6810+ GET_THREAD_INFO(%r10)
6811+ movl TI_sysenter_return(%r10), %r10d
6812 CFI_REGISTER rip,r10
6813 pushq $__USER32_CS
6814 CFI_ADJUST_CFA_OFFSET 8
6815@@ -150,6 +175,12 @@ ENTRY(ia32_sysenter_target)
6816 SAVE_ARGS 0,0,1
6817 /* no need to do an access_ok check here because rbp has been
6818 32bit zero extended */
6819+
6820+#ifdef CONFIG_PAX_MEMORY_UDEREF
6821+ mov $PAX_USER_SHADOW_BASE,%r10
6822+ add %r10,%rbp
6823+#endif
6824+
6825 1: movl (%rbp),%ebp
6826 .section __ex_table,"a"
6827 .quad 1b,ia32_badarg
6828@@ -172,6 +203,8 @@ sysenter_dispatch:
6829 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6830 jnz sysexit_audit
6831 sysexit_from_sys_call:
6832+ pax_exit_kernel_user
6833+ pax_erase_kstack
6834 andl $~TS_COMPAT,TI_status(%r10)
6835 /* clear IF, that popfq doesn't enable interrupts early */
6836 andl $~0x200,EFLAGS-R11(%rsp)
6837@@ -200,6 +233,9 @@ sysexit_from_sys_call:
6838 movl %eax,%esi /* 2nd arg: syscall number */
6839 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6840 call audit_syscall_entry
6841+
6842+ pax_erase_kstack
6843+
6844 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6845 cmpq $(IA32_NR_syscalls-1),%rax
6846 ja ia32_badsys
6847@@ -252,6 +288,9 @@ sysenter_tracesys:
6848 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6849 movq %rsp,%rdi /* &pt_regs -> arg1 */
6850 call syscall_trace_enter
6851+
6852+ pax_erase_kstack
6853+
6854 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6855 RESTORE_REST
6856 cmpq $(IA32_NR_syscalls-1),%rax
6857@@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6858 ENTRY(ia32_cstar_target)
6859 CFI_STARTPROC32 simple
6860 CFI_SIGNAL_FRAME
6861- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6862+ CFI_DEF_CFA rsp,0
6863 CFI_REGISTER rip,rcx
6864 /*CFI_REGISTER rflags,r11*/
6865 SWAPGS_UNSAFE_STACK
6866 movl %esp,%r8d
6867 CFI_REGISTER rsp,r8
6868 movq PER_CPU_VAR(kernel_stack),%rsp
6869+
6870+#ifdef CONFIG_PAX_MEMORY_UDEREF
6871+ pax_enter_kernel_user
6872+#endif
6873+
6874 /*
6875 * No need to follow this irqs on/off section: the syscall
6876 * disabled irqs and here we enable it straight after entry:
6877 */
6878 ENABLE_INTERRUPTS(CLBR_NONE)
6879- SAVE_ARGS 8,1,1
6880+ SAVE_ARGS 8*6,1,1
6881 movl %eax,%eax /* zero extension */
6882 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6883 movq %rcx,RIP-ARGOFFSET(%rsp)
6884@@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6885 /* no need to do an access_ok check here because r8 has been
6886 32bit zero extended */
6887 /* hardware stack frame is complete now */
6888+
6889+#ifdef CONFIG_PAX_MEMORY_UDEREF
6890+ mov $PAX_USER_SHADOW_BASE,%r10
6891+ add %r10,%r8
6892+#endif
6893+
6894 1: movl (%r8),%r9d
6895 .section __ex_table,"a"
6896 .quad 1b,ia32_badarg
6897@@ -333,6 +383,8 @@ cstar_dispatch:
6898 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6899 jnz sysretl_audit
6900 sysretl_from_sys_call:
6901+ pax_exit_kernel_user
6902+ pax_erase_kstack
6903 andl $~TS_COMPAT,TI_status(%r10)
6904 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6905 movl RIP-ARGOFFSET(%rsp),%ecx
6906@@ -370,6 +422,9 @@ cstar_tracesys:
6907 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6908 movq %rsp,%rdi /* &pt_regs -> arg1 */
6909 call syscall_trace_enter
6910+
6911+ pax_erase_kstack
6912+
6913 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6914 RESTORE_REST
6915 xchgl %ebp,%r9d
6916@@ -415,6 +470,7 @@ ENTRY(ia32_syscall)
6917 CFI_REL_OFFSET rip,RIP-RIP
6918 PARAVIRT_ADJUST_EXCEPTION_FRAME
6919 SWAPGS
6920+ pax_enter_kernel_user
6921 /*
6922 * No need to follow this irqs on/off section: the syscall
6923 * disabled irqs and here we enable it straight after entry:
6924@@ -448,6 +504,9 @@ ia32_tracesys:
6925 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6926 movq %rsp,%rdi /* &pt_regs -> arg1 */
6927 call syscall_trace_enter
6928+
6929+ pax_erase_kstack
6930+
6931 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6932 RESTORE_REST
6933 cmpq $(IA32_NR_syscalls-1),%rax
6934diff -urNp linux-2.6.32.46/arch/x86/ia32/ia32_signal.c linux-2.6.32.46/arch/x86/ia32/ia32_signal.c
6935--- linux-2.6.32.46/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6936+++ linux-2.6.32.46/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6937@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6938 sp -= frame_size;
6939 /* Align the stack pointer according to the i386 ABI,
6940 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6941- sp = ((sp + 4) & -16ul) - 4;
6942+ sp = ((sp - 12) & -16ul) - 4;
6943 return (void __user *) sp;
6944 }
6945
6946@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6947 * These are actually not used anymore, but left because some
6948 * gdb versions depend on them as a marker.
6949 */
6950- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6951+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6952 } put_user_catch(err);
6953
6954 if (err)
6955@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6956 0xb8,
6957 __NR_ia32_rt_sigreturn,
6958 0x80cd,
6959- 0,
6960+ 0
6961 };
6962
6963 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6964@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6965
6966 if (ka->sa.sa_flags & SA_RESTORER)
6967 restorer = ka->sa.sa_restorer;
6968+ else if (current->mm->context.vdso)
6969+ /* Return stub is in 32bit vsyscall page */
6970+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6971 else
6972- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6973- rt_sigreturn);
6974+ restorer = &frame->retcode;
6975 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6976
6977 /*
6978 * Not actually used anymore, but left because some gdb
6979 * versions need it.
6980 */
6981- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6982+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6983 } put_user_catch(err);
6984
6985 if (err)
6986diff -urNp linux-2.6.32.46/arch/x86/include/asm/alternative.h linux-2.6.32.46/arch/x86/include/asm/alternative.h
6987--- linux-2.6.32.46/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6988+++ linux-2.6.32.46/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6989@@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6990 " .byte 662b-661b\n" /* sourcelen */ \
6991 " .byte 664f-663f\n" /* replacementlen */ \
6992 ".previous\n" \
6993- ".section .altinstr_replacement, \"ax\"\n" \
6994+ ".section .altinstr_replacement, \"a\"\n" \
6995 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6996 ".previous"
6997
6998diff -urNp linux-2.6.32.46/arch/x86/include/asm/apic.h linux-2.6.32.46/arch/x86/include/asm/apic.h
6999--- linux-2.6.32.46/arch/x86/include/asm/apic.h 2011-03-27 14:31:47.000000000 -0400
7000+++ linux-2.6.32.46/arch/x86/include/asm/apic.h 2011-08-17 20:01:15.000000000 -0400
7001@@ -46,7 +46,7 @@ static inline void generic_apic_probe(vo
7002
7003 #ifdef CONFIG_X86_LOCAL_APIC
7004
7005-extern unsigned int apic_verbosity;
7006+extern int apic_verbosity;
7007 extern int local_apic_timer_c2_ok;
7008
7009 extern int disable_apic;
7010diff -urNp linux-2.6.32.46/arch/x86/include/asm/apm.h linux-2.6.32.46/arch/x86/include/asm/apm.h
7011--- linux-2.6.32.46/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
7012+++ linux-2.6.32.46/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
7013@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
7014 __asm__ __volatile__(APM_DO_ZERO_SEGS
7015 "pushl %%edi\n\t"
7016 "pushl %%ebp\n\t"
7017- "lcall *%%cs:apm_bios_entry\n\t"
7018+ "lcall *%%ss:apm_bios_entry\n\t"
7019 "setc %%al\n\t"
7020 "popl %%ebp\n\t"
7021 "popl %%edi\n\t"
7022@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
7023 __asm__ __volatile__(APM_DO_ZERO_SEGS
7024 "pushl %%edi\n\t"
7025 "pushl %%ebp\n\t"
7026- "lcall *%%cs:apm_bios_entry\n\t"
7027+ "lcall *%%ss:apm_bios_entry\n\t"
7028 "setc %%bl\n\t"
7029 "popl %%ebp\n\t"
7030 "popl %%edi\n\t"
7031diff -urNp linux-2.6.32.46/arch/x86/include/asm/atomic_32.h linux-2.6.32.46/arch/x86/include/asm/atomic_32.h
7032--- linux-2.6.32.46/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
7033+++ linux-2.6.32.46/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
7034@@ -25,6 +25,17 @@ static inline int atomic_read(const atom
7035 }
7036
7037 /**
7038+ * atomic_read_unchecked - read atomic variable
7039+ * @v: pointer of type atomic_unchecked_t
7040+ *
7041+ * Atomically reads the value of @v.
7042+ */
7043+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7044+{
7045+ return v->counter;
7046+}
7047+
7048+/**
7049 * atomic_set - set atomic variable
7050 * @v: pointer of type atomic_t
7051 * @i: required value
7052@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
7053 }
7054
7055 /**
7056+ * atomic_set_unchecked - set atomic variable
7057+ * @v: pointer of type atomic_unchecked_t
7058+ * @i: required value
7059+ *
7060+ * Atomically sets the value of @v to @i.
7061+ */
7062+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7063+{
7064+ v->counter = i;
7065+}
7066+
7067+/**
7068 * atomic_add - add integer to atomic variable
7069 * @i: integer value to add
7070 * @v: pointer of type atomic_t
7071@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
7072 */
7073 static inline void atomic_add(int i, atomic_t *v)
7074 {
7075- asm volatile(LOCK_PREFIX "addl %1,%0"
7076+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7077+
7078+#ifdef CONFIG_PAX_REFCOUNT
7079+ "jno 0f\n"
7080+ LOCK_PREFIX "subl %1,%0\n"
7081+ "int $4\n0:\n"
7082+ _ASM_EXTABLE(0b, 0b)
7083+#endif
7084+
7085+ : "+m" (v->counter)
7086+ : "ir" (i));
7087+}
7088+
7089+/**
7090+ * atomic_add_unchecked - add integer to atomic variable
7091+ * @i: integer value to add
7092+ * @v: pointer of type atomic_unchecked_t
7093+ *
7094+ * Atomically adds @i to @v.
7095+ */
7096+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7097+{
7098+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7099 : "+m" (v->counter)
7100 : "ir" (i));
7101 }
7102@@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
7103 */
7104 static inline void atomic_sub(int i, atomic_t *v)
7105 {
7106- asm volatile(LOCK_PREFIX "subl %1,%0"
7107+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7108+
7109+#ifdef CONFIG_PAX_REFCOUNT
7110+ "jno 0f\n"
7111+ LOCK_PREFIX "addl %1,%0\n"
7112+ "int $4\n0:\n"
7113+ _ASM_EXTABLE(0b, 0b)
7114+#endif
7115+
7116+ : "+m" (v->counter)
7117+ : "ir" (i));
7118+}
7119+
7120+/**
7121+ * atomic_sub_unchecked - subtract integer from atomic variable
7122+ * @i: integer value to subtract
7123+ * @v: pointer of type atomic_unchecked_t
7124+ *
7125+ * Atomically subtracts @i from @v.
7126+ */
7127+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7128+{
7129+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7130 : "+m" (v->counter)
7131 : "ir" (i));
7132 }
7133@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
7134 {
7135 unsigned char c;
7136
7137- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7138+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7139+
7140+#ifdef CONFIG_PAX_REFCOUNT
7141+ "jno 0f\n"
7142+ LOCK_PREFIX "addl %2,%0\n"
7143+ "int $4\n0:\n"
7144+ _ASM_EXTABLE(0b, 0b)
7145+#endif
7146+
7147+ "sete %1\n"
7148 : "+m" (v->counter), "=qm" (c)
7149 : "ir" (i) : "memory");
7150 return c;
7151@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
7152 */
7153 static inline void atomic_inc(atomic_t *v)
7154 {
7155- asm volatile(LOCK_PREFIX "incl %0"
7156+ asm volatile(LOCK_PREFIX "incl %0\n"
7157+
7158+#ifdef CONFIG_PAX_REFCOUNT
7159+ "jno 0f\n"
7160+ LOCK_PREFIX "decl %0\n"
7161+ "int $4\n0:\n"
7162+ _ASM_EXTABLE(0b, 0b)
7163+#endif
7164+
7165+ : "+m" (v->counter));
7166+}
7167+
7168+/**
7169+ * atomic_inc_unchecked - increment atomic variable
7170+ * @v: pointer of type atomic_unchecked_t
7171+ *
7172+ * Atomically increments @v by 1.
7173+ */
7174+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7175+{
7176+ asm volatile(LOCK_PREFIX "incl %0\n"
7177 : "+m" (v->counter));
7178 }
7179
7180@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
7181 */
7182 static inline void atomic_dec(atomic_t *v)
7183 {
7184- asm volatile(LOCK_PREFIX "decl %0"
7185+ asm volatile(LOCK_PREFIX "decl %0\n"
7186+
7187+#ifdef CONFIG_PAX_REFCOUNT
7188+ "jno 0f\n"
7189+ LOCK_PREFIX "incl %0\n"
7190+ "int $4\n0:\n"
7191+ _ASM_EXTABLE(0b, 0b)
7192+#endif
7193+
7194+ : "+m" (v->counter));
7195+}
7196+
7197+/**
7198+ * atomic_dec_unchecked - decrement atomic variable
7199+ * @v: pointer of type atomic_unchecked_t
7200+ *
7201+ * Atomically decrements @v by 1.
7202+ */
7203+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7204+{
7205+ asm volatile(LOCK_PREFIX "decl %0\n"
7206 : "+m" (v->counter));
7207 }
7208
7209@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
7210 {
7211 unsigned char c;
7212
7213- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7214+ asm volatile(LOCK_PREFIX "decl %0\n"
7215+
7216+#ifdef CONFIG_PAX_REFCOUNT
7217+ "jno 0f\n"
7218+ LOCK_PREFIX "incl %0\n"
7219+ "int $4\n0:\n"
7220+ _ASM_EXTABLE(0b, 0b)
7221+#endif
7222+
7223+ "sete %1\n"
7224 : "+m" (v->counter), "=qm" (c)
7225 : : "memory");
7226 return c != 0;
7227@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
7228 {
7229 unsigned char c;
7230
7231- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7232+ asm volatile(LOCK_PREFIX "incl %0\n"
7233+
7234+#ifdef CONFIG_PAX_REFCOUNT
7235+ "jno 0f\n"
7236+ LOCK_PREFIX "decl %0\n"
7237+ "into\n0:\n"
7238+ _ASM_EXTABLE(0b, 0b)
7239+#endif
7240+
7241+ "sete %1\n"
7242+ : "+m" (v->counter), "=qm" (c)
7243+ : : "memory");
7244+ return c != 0;
7245+}
7246+
7247+/**
7248+ * atomic_inc_and_test_unchecked - increment and test
7249+ * @v: pointer of type atomic_unchecked_t
7250+ *
7251+ * Atomically increments @v by 1
7252+ * and returns true if the result is zero, or false for all
7253+ * other cases.
7254+ */
7255+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7256+{
7257+ unsigned char c;
7258+
7259+ asm volatile(LOCK_PREFIX "incl %0\n"
7260+ "sete %1\n"
7261 : "+m" (v->counter), "=qm" (c)
7262 : : "memory");
7263 return c != 0;
7264@@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
7265 {
7266 unsigned char c;
7267
7268- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7269+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7270+
7271+#ifdef CONFIG_PAX_REFCOUNT
7272+ "jno 0f\n"
7273+ LOCK_PREFIX "subl %2,%0\n"
7274+ "int $4\n0:\n"
7275+ _ASM_EXTABLE(0b, 0b)
7276+#endif
7277+
7278+ "sets %1\n"
7279 : "+m" (v->counter), "=qm" (c)
7280 : "ir" (i) : "memory");
7281 return c;
7282@@ -179,6 +341,46 @@ static inline int atomic_add_return(int
7283 #endif
7284 /* Modern 486+ processor */
7285 __i = i;
7286+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7287+
7288+#ifdef CONFIG_PAX_REFCOUNT
7289+ "jno 0f\n"
7290+ "movl %0, %1\n"
7291+ "int $4\n0:\n"
7292+ _ASM_EXTABLE(0b, 0b)
7293+#endif
7294+
7295+ : "+r" (i), "+m" (v->counter)
7296+ : : "memory");
7297+ return i + __i;
7298+
7299+#ifdef CONFIG_M386
7300+no_xadd: /* Legacy 386 processor */
7301+ local_irq_save(flags);
7302+ __i = atomic_read(v);
7303+ atomic_set(v, i + __i);
7304+ local_irq_restore(flags);
7305+ return i + __i;
7306+#endif
7307+}
7308+
7309+/**
7310+ * atomic_add_return_unchecked - add integer and return
7311+ * @v: pointer of type atomic_unchecked_t
7312+ * @i: integer value to add
7313+ *
7314+ * Atomically adds @i to @v and returns @i + @v
7315+ */
7316+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7317+{
7318+ int __i;
7319+#ifdef CONFIG_M386
7320+ unsigned long flags;
7321+ if (unlikely(boot_cpu_data.x86 <= 3))
7322+ goto no_xadd;
7323+#endif
7324+ /* Modern 486+ processor */
7325+ __i = i;
7326 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7327 : "+r" (i), "+m" (v->counter)
7328 : : "memory");
7329@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
7330 return cmpxchg(&v->counter, old, new);
7331 }
7332
7333+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7334+{
7335+ return cmpxchg(&v->counter, old, new);
7336+}
7337+
7338 static inline int atomic_xchg(atomic_t *v, int new)
7339 {
7340 return xchg(&v->counter, new);
7341 }
7342
7343+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7344+{
7345+ return xchg(&v->counter, new);
7346+}
7347+
7348 /**
7349 * atomic_add_unless - add unless the number is already a given value
7350 * @v: pointer of type atomic_t
7351@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7352 */
7353 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7354 {
7355- int c, old;
7356+ int c, old, new;
7357 c = atomic_read(v);
7358 for (;;) {
7359- if (unlikely(c == (u)))
7360+ if (unlikely(c == u))
7361 break;
7362- old = atomic_cmpxchg((v), c, c + (a));
7363+
7364+ asm volatile("addl %2,%0\n"
7365+
7366+#ifdef CONFIG_PAX_REFCOUNT
7367+ "jno 0f\n"
7368+ "subl %2,%0\n"
7369+ "int $4\n0:\n"
7370+ _ASM_EXTABLE(0b, 0b)
7371+#endif
7372+
7373+ : "=r" (new)
7374+ : "0" (c), "ir" (a));
7375+
7376+ old = atomic_cmpxchg(v, c, new);
7377 if (likely(old == c))
7378 break;
7379 c = old;
7380 }
7381- return c != (u);
7382+ return c != u;
7383 }
7384
7385 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7386
7387 #define atomic_inc_return(v) (atomic_add_return(1, v))
7388+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7389+{
7390+ return atomic_add_return_unchecked(1, v);
7391+}
7392 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7393
7394 /* These are x86-specific, used by some header files */
7395@@ -266,9 +495,18 @@ typedef struct {
7396 u64 __aligned(8) counter;
7397 } atomic64_t;
7398
7399+#ifdef CONFIG_PAX_REFCOUNT
7400+typedef struct {
7401+ u64 __aligned(8) counter;
7402+} atomic64_unchecked_t;
7403+#else
7404+typedef atomic64_t atomic64_unchecked_t;
7405+#endif
7406+
7407 #define ATOMIC64_INIT(val) { (val) }
7408
7409 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7410+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7411
7412 /**
7413 * atomic64_xchg - xchg atomic64 variable
7414@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7415 * the old value.
7416 */
7417 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7418+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7419
7420 /**
7421 * atomic64_set - set atomic64 variable
7422@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7423 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7424
7425 /**
7426+ * atomic64_unchecked_set - set atomic64 variable
7427+ * @ptr: pointer to type atomic64_unchecked_t
7428+ * @new_val: value to assign
7429+ *
7430+ * Atomically sets the value of @ptr to @new_val.
7431+ */
7432+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7433+
7434+/**
7435 * atomic64_read - read atomic64 variable
7436 * @ptr: pointer to type atomic64_t
7437 *
7438@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7439 return res;
7440 }
7441
7442-extern u64 atomic64_read(atomic64_t *ptr);
7443+/**
7444+ * atomic64_read_unchecked - read atomic64 variable
7445+ * @ptr: pointer to type atomic64_unchecked_t
7446+ *
7447+ * Atomically reads the value of @ptr and returns it.
7448+ */
7449+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7450+{
7451+ u64 res;
7452+
7453+ /*
7454+ * Note, we inline this atomic64_unchecked_t primitive because
7455+ * it only clobbers EAX/EDX and leaves the others
7456+ * untouched. We also (somewhat subtly) rely on the
7457+ * fact that cmpxchg8b returns the current 64-bit value
7458+ * of the memory location we are touching:
7459+ */
7460+ asm volatile(
7461+ "mov %%ebx, %%eax\n\t"
7462+ "mov %%ecx, %%edx\n\t"
7463+ LOCK_PREFIX "cmpxchg8b %1\n"
7464+ : "=&A" (res)
7465+ : "m" (*ptr)
7466+ );
7467+
7468+ return res;
7469+}
7470
7471 /**
7472 * atomic64_add_return - add and return
7473@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7474 * Other variants with different arithmetic operators:
7475 */
7476 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7477+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7478 extern u64 atomic64_inc_return(atomic64_t *ptr);
7479+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7480 extern u64 atomic64_dec_return(atomic64_t *ptr);
7481+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7482
7483 /**
7484 * atomic64_add - add integer to atomic64 variable
7485@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7486 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7487
7488 /**
7489+ * atomic64_add_unchecked - add integer to atomic64 variable
7490+ * @delta: integer value to add
7491+ * @ptr: pointer to type atomic64_unchecked_t
7492+ *
7493+ * Atomically adds @delta to @ptr.
7494+ */
7495+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7496+
7497+/**
7498 * atomic64_sub - subtract the atomic64 variable
7499 * @delta: integer value to subtract
7500 * @ptr: pointer to type atomic64_t
7501@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7502 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7503
7504 /**
7505+ * atomic64_sub_unchecked - subtract the atomic64 variable
7506+ * @delta: integer value to subtract
7507+ * @ptr: pointer to type atomic64_unchecked_t
7508+ *
7509+ * Atomically subtracts @delta from @ptr.
7510+ */
7511+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7512+
7513+/**
7514 * atomic64_sub_and_test - subtract value from variable and test result
7515 * @delta: integer value to subtract
7516 * @ptr: pointer to type atomic64_t
7517@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7518 extern void atomic64_inc(atomic64_t *ptr);
7519
7520 /**
7521+ * atomic64_inc_unchecked - increment atomic64 variable
7522+ * @ptr: pointer to type atomic64_unchecked_t
7523+ *
7524+ * Atomically increments @ptr by 1.
7525+ */
7526+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7527+
7528+/**
7529 * atomic64_dec - decrement atomic64 variable
7530 * @ptr: pointer to type atomic64_t
7531 *
7532@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7533 extern void atomic64_dec(atomic64_t *ptr);
7534
7535 /**
7536+ * atomic64_dec_unchecked - decrement atomic64 variable
7537+ * @ptr: pointer to type atomic64_unchecked_t
7538+ *
7539+ * Atomically decrements @ptr by 1.
7540+ */
7541+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7542+
7543+/**
7544 * atomic64_dec_and_test - decrement and test
7545 * @ptr: pointer to type atomic64_t
7546 *
7547diff -urNp linux-2.6.32.46/arch/x86/include/asm/atomic_64.h linux-2.6.32.46/arch/x86/include/asm/atomic_64.h
7548--- linux-2.6.32.46/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7549+++ linux-2.6.32.46/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7550@@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7551 }
7552
7553 /**
7554+ * atomic_read_unchecked - read atomic variable
7555+ * @v: pointer of type atomic_unchecked_t
7556+ *
7557+ * Atomically reads the value of @v.
7558+ */
7559+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7560+{
7561+ return v->counter;
7562+}
7563+
7564+/**
7565 * atomic_set - set atomic variable
7566 * @v: pointer of type atomic_t
7567 * @i: required value
7568@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7569 }
7570
7571 /**
7572+ * atomic_set_unchecked - set atomic variable
7573+ * @v: pointer of type atomic_unchecked_t
7574+ * @i: required value
7575+ *
7576+ * Atomically sets the value of @v to @i.
7577+ */
7578+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7579+{
7580+ v->counter = i;
7581+}
7582+
7583+/**
7584 * atomic_add - add integer to atomic variable
7585 * @i: integer value to add
7586 * @v: pointer of type atomic_t
7587@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7588 */
7589 static inline void atomic_add(int i, atomic_t *v)
7590 {
7591- asm volatile(LOCK_PREFIX "addl %1,%0"
7592+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7593+
7594+#ifdef CONFIG_PAX_REFCOUNT
7595+ "jno 0f\n"
7596+ LOCK_PREFIX "subl %1,%0\n"
7597+ "int $4\n0:\n"
7598+ _ASM_EXTABLE(0b, 0b)
7599+#endif
7600+
7601+ : "=m" (v->counter)
7602+ : "ir" (i), "m" (v->counter));
7603+}
7604+
7605+/**
7606+ * atomic_add_unchecked - add integer to atomic variable
7607+ * @i: integer value to add
7608+ * @v: pointer of type atomic_unchecked_t
7609+ *
7610+ * Atomically adds @i to @v.
7611+ */
7612+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7613+{
7614+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7615 : "=m" (v->counter)
7616 : "ir" (i), "m" (v->counter));
7617 }
7618@@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7619 */
7620 static inline void atomic_sub(int i, atomic_t *v)
7621 {
7622- asm volatile(LOCK_PREFIX "subl %1,%0"
7623+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7624+
7625+#ifdef CONFIG_PAX_REFCOUNT
7626+ "jno 0f\n"
7627+ LOCK_PREFIX "addl %1,%0\n"
7628+ "int $4\n0:\n"
7629+ _ASM_EXTABLE(0b, 0b)
7630+#endif
7631+
7632+ : "=m" (v->counter)
7633+ : "ir" (i), "m" (v->counter));
7634+}
7635+
7636+/**
7637+ * atomic_sub_unchecked - subtract the atomic variable
7638+ * @i: integer value to subtract
7639+ * @v: pointer of type atomic_unchecked_t
7640+ *
7641+ * Atomically subtracts @i from @v.
7642+ */
7643+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7644+{
7645+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7646 : "=m" (v->counter)
7647 : "ir" (i), "m" (v->counter));
7648 }
7649@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7650 {
7651 unsigned char c;
7652
7653- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7654+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7655+
7656+#ifdef CONFIG_PAX_REFCOUNT
7657+ "jno 0f\n"
7658+ LOCK_PREFIX "addl %2,%0\n"
7659+ "int $4\n0:\n"
7660+ _ASM_EXTABLE(0b, 0b)
7661+#endif
7662+
7663+ "sete %1\n"
7664 : "=m" (v->counter), "=qm" (c)
7665 : "ir" (i), "m" (v->counter) : "memory");
7666 return c;
7667@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7668 */
7669 static inline void atomic_inc(atomic_t *v)
7670 {
7671- asm volatile(LOCK_PREFIX "incl %0"
7672+ asm volatile(LOCK_PREFIX "incl %0\n"
7673+
7674+#ifdef CONFIG_PAX_REFCOUNT
7675+ "jno 0f\n"
7676+ LOCK_PREFIX "decl %0\n"
7677+ "int $4\n0:\n"
7678+ _ASM_EXTABLE(0b, 0b)
7679+#endif
7680+
7681+ : "=m" (v->counter)
7682+ : "m" (v->counter));
7683+}
7684+
7685+/**
7686+ * atomic_inc_unchecked - increment atomic variable
7687+ * @v: pointer of type atomic_unchecked_t
7688+ *
7689+ * Atomically increments @v by 1.
7690+ */
7691+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7692+{
7693+ asm volatile(LOCK_PREFIX "incl %0\n"
7694 : "=m" (v->counter)
7695 : "m" (v->counter));
7696 }
7697@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7698 */
7699 static inline void atomic_dec(atomic_t *v)
7700 {
7701- asm volatile(LOCK_PREFIX "decl %0"
7702+ asm volatile(LOCK_PREFIX "decl %0\n"
7703+
7704+#ifdef CONFIG_PAX_REFCOUNT
7705+ "jno 0f\n"
7706+ LOCK_PREFIX "incl %0\n"
7707+ "int $4\n0:\n"
7708+ _ASM_EXTABLE(0b, 0b)
7709+#endif
7710+
7711+ : "=m" (v->counter)
7712+ : "m" (v->counter));
7713+}
7714+
7715+/**
7716+ * atomic_dec_unchecked - decrement atomic variable
7717+ * @v: pointer of type atomic_unchecked_t
7718+ *
7719+ * Atomically decrements @v by 1.
7720+ */
7721+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7722+{
7723+ asm volatile(LOCK_PREFIX "decl %0\n"
7724 : "=m" (v->counter)
7725 : "m" (v->counter));
7726 }
7727@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7728 {
7729 unsigned char c;
7730
7731- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7732+ asm volatile(LOCK_PREFIX "decl %0\n"
7733+
7734+#ifdef CONFIG_PAX_REFCOUNT
7735+ "jno 0f\n"
7736+ LOCK_PREFIX "incl %0\n"
7737+ "int $4\n0:\n"
7738+ _ASM_EXTABLE(0b, 0b)
7739+#endif
7740+
7741+ "sete %1\n"
7742 : "=m" (v->counter), "=qm" (c)
7743 : "m" (v->counter) : "memory");
7744 return c != 0;
7745@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7746 {
7747 unsigned char c;
7748
7749- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7750+ asm volatile(LOCK_PREFIX "incl %0\n"
7751+
7752+#ifdef CONFIG_PAX_REFCOUNT
7753+ "jno 0f\n"
7754+ LOCK_PREFIX "decl %0\n"
7755+ "int $4\n0:\n"
7756+ _ASM_EXTABLE(0b, 0b)
7757+#endif
7758+
7759+ "sete %1\n"
7760+ : "=m" (v->counter), "=qm" (c)
7761+ : "m" (v->counter) : "memory");
7762+ return c != 0;
7763+}
7764+
7765+/**
7766+ * atomic_inc_and_test_unchecked - increment and test
7767+ * @v: pointer of type atomic_unchecked_t
7768+ *
7769+ * Atomically increments @v by 1
7770+ * and returns true if the result is zero, or false for all
7771+ * other cases.
7772+ */
7773+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7774+{
7775+ unsigned char c;
7776+
7777+ asm volatile(LOCK_PREFIX "incl %0\n"
7778+ "sete %1\n"
7779 : "=m" (v->counter), "=qm" (c)
7780 : "m" (v->counter) : "memory");
7781 return c != 0;
7782@@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7783 {
7784 unsigned char c;
7785
7786- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7787+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7788+
7789+#ifdef CONFIG_PAX_REFCOUNT
7790+ "jno 0f\n"
7791+ LOCK_PREFIX "subl %2,%0\n"
7792+ "int $4\n0:\n"
7793+ _ASM_EXTABLE(0b, 0b)
7794+#endif
7795+
7796+ "sets %1\n"
7797 : "=m" (v->counter), "=qm" (c)
7798 : "ir" (i), "m" (v->counter) : "memory");
7799 return c;
7800@@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7801 static inline int atomic_add_return(int i, atomic_t *v)
7802 {
7803 int __i = i;
7804- asm volatile(LOCK_PREFIX "xaddl %0, %1"
7805+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7806+
7807+#ifdef CONFIG_PAX_REFCOUNT
7808+ "jno 0f\n"
7809+ "movl %0, %1\n"
7810+ "int $4\n0:\n"
7811+ _ASM_EXTABLE(0b, 0b)
7812+#endif
7813+
7814+ : "+r" (i), "+m" (v->counter)
7815+ : : "memory");
7816+ return i + __i;
7817+}
7818+
7819+/**
7820+ * atomic_add_return_unchecked - add and return
7821+ * @i: integer value to add
7822+ * @v: pointer of type atomic_unchecked_t
7823+ *
7824+ * Atomically adds @i to @v and returns @i + @v
7825+ */
7826+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7827+{
7828+ int __i = i;
7829+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7830 : "+r" (i), "+m" (v->counter)
7831 : : "memory");
7832 return i + __i;
7833@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7834 }
7835
7836 #define atomic_inc_return(v) (atomic_add_return(1, v))
7837+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7838+{
7839+ return atomic_add_return_unchecked(1, v);
7840+}
7841 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7842
7843 /* The 64-bit atomic type */
7844@@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7845 }
7846
7847 /**
7848+ * atomic64_read_unchecked - read atomic64 variable
7849+ * @v: pointer of type atomic64_unchecked_t
7850+ *
7851+ * Atomically reads the value of @v.
7852+ * Doesn't imply a read memory barrier.
7853+ */
7854+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7855+{
7856+ return v->counter;
7857+}
7858+
7859+/**
7860 * atomic64_set - set atomic64 variable
7861 * @v: pointer to type atomic64_t
7862 * @i: required value
7863@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7864 }
7865
7866 /**
7867+ * atomic64_set_unchecked - set atomic64 variable
7868+ * @v: pointer to type atomic64_unchecked_t
7869+ * @i: required value
7870+ *
7871+ * Atomically sets the value of @v to @i.
7872+ */
7873+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7874+{
7875+ v->counter = i;
7876+}
7877+
7878+/**
7879 * atomic64_add - add integer to atomic64 variable
7880 * @i: integer value to add
7881 * @v: pointer to type atomic64_t
7882@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7883 */
7884 static inline void atomic64_add(long i, atomic64_t *v)
7885 {
7886+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
7887+
7888+#ifdef CONFIG_PAX_REFCOUNT
7889+ "jno 0f\n"
7890+ LOCK_PREFIX "subq %1,%0\n"
7891+ "int $4\n0:\n"
7892+ _ASM_EXTABLE(0b, 0b)
7893+#endif
7894+
7895+ : "=m" (v->counter)
7896+ : "er" (i), "m" (v->counter));
7897+}
7898+
7899+/**
7900+ * atomic64_add_unchecked - add integer to atomic64 variable
7901+ * @i: integer value to add
7902+ * @v: pointer to type atomic64_unchecked_t
7903+ *
7904+ * Atomically adds @i to @v.
7905+ */
7906+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7907+{
7908 asm volatile(LOCK_PREFIX "addq %1,%0"
7909 : "=m" (v->counter)
7910 : "er" (i), "m" (v->counter));
7911@@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7912 */
7913 static inline void atomic64_sub(long i, atomic64_t *v)
7914 {
7915- asm volatile(LOCK_PREFIX "subq %1,%0"
7916+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7917+
7918+#ifdef CONFIG_PAX_REFCOUNT
7919+ "jno 0f\n"
7920+ LOCK_PREFIX "addq %1,%0\n"
7921+ "int $4\n0:\n"
7922+ _ASM_EXTABLE(0b, 0b)
7923+#endif
7924+
7925 : "=m" (v->counter)
7926 : "er" (i), "m" (v->counter));
7927 }
7928@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7929 {
7930 unsigned char c;
7931
7932- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7933+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
7934+
7935+#ifdef CONFIG_PAX_REFCOUNT
7936+ "jno 0f\n"
7937+ LOCK_PREFIX "addq %2,%0\n"
7938+ "int $4\n0:\n"
7939+ _ASM_EXTABLE(0b, 0b)
7940+#endif
7941+
7942+ "sete %1\n"
7943 : "=m" (v->counter), "=qm" (c)
7944 : "er" (i), "m" (v->counter) : "memory");
7945 return c;
7946@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7947 */
7948 static inline void atomic64_inc(atomic64_t *v)
7949 {
7950+ asm volatile(LOCK_PREFIX "incq %0\n"
7951+
7952+#ifdef CONFIG_PAX_REFCOUNT
7953+ "jno 0f\n"
7954+ LOCK_PREFIX "decq %0\n"
7955+ "int $4\n0:\n"
7956+ _ASM_EXTABLE(0b, 0b)
7957+#endif
7958+
7959+ : "=m" (v->counter)
7960+ : "m" (v->counter));
7961+}
7962+
7963+/**
7964+ * atomic64_inc_unchecked - increment atomic64 variable
7965+ * @v: pointer to type atomic64_unchecked_t
7966+ *
7967+ * Atomically increments @v by 1.
7968+ */
7969+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7970+{
7971 asm volatile(LOCK_PREFIX "incq %0"
7972 : "=m" (v->counter)
7973 : "m" (v->counter));
7974@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7975 */
7976 static inline void atomic64_dec(atomic64_t *v)
7977 {
7978- asm volatile(LOCK_PREFIX "decq %0"
7979+ asm volatile(LOCK_PREFIX "decq %0\n"
7980+
7981+#ifdef CONFIG_PAX_REFCOUNT
7982+ "jno 0f\n"
7983+ LOCK_PREFIX "incq %0\n"
7984+ "int $4\n0:\n"
7985+ _ASM_EXTABLE(0b, 0b)
7986+#endif
7987+
7988+ : "=m" (v->counter)
7989+ : "m" (v->counter));
7990+}
7991+
7992+/**
7993+ * atomic64_dec_unchecked - decrement atomic64 variable
7994+ * @v: pointer to type atomic64_t
7995+ *
7996+ * Atomically decrements @v by 1.
7997+ */
7998+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7999+{
8000+ asm volatile(LOCK_PREFIX "decq %0\n"
8001 : "=m" (v->counter)
8002 : "m" (v->counter));
8003 }
8004@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
8005 {
8006 unsigned char c;
8007
8008- asm volatile(LOCK_PREFIX "decq %0; sete %1"
8009+ asm volatile(LOCK_PREFIX "decq %0\n"
8010+
8011+#ifdef CONFIG_PAX_REFCOUNT
8012+ "jno 0f\n"
8013+ LOCK_PREFIX "incq %0\n"
8014+ "int $4\n0:\n"
8015+ _ASM_EXTABLE(0b, 0b)
8016+#endif
8017+
8018+ "sete %1\n"
8019 : "=m" (v->counter), "=qm" (c)
8020 : "m" (v->counter) : "memory");
8021 return c != 0;
8022@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
8023 {
8024 unsigned char c;
8025
8026- asm volatile(LOCK_PREFIX "incq %0; sete %1"
8027+ asm volatile(LOCK_PREFIX "incq %0\n"
8028+
8029+#ifdef CONFIG_PAX_REFCOUNT
8030+ "jno 0f\n"
8031+ LOCK_PREFIX "decq %0\n"
8032+ "int $4\n0:\n"
8033+ _ASM_EXTABLE(0b, 0b)
8034+#endif
8035+
8036+ "sete %1\n"
8037 : "=m" (v->counter), "=qm" (c)
8038 : "m" (v->counter) : "memory");
8039 return c != 0;
8040@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
8041 {
8042 unsigned char c;
8043
8044- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
8045+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
8046+
8047+#ifdef CONFIG_PAX_REFCOUNT
8048+ "jno 0f\n"
8049+ LOCK_PREFIX "subq %2,%0\n"
8050+ "int $4\n0:\n"
8051+ _ASM_EXTABLE(0b, 0b)
8052+#endif
8053+
8054+ "sets %1\n"
8055 : "=m" (v->counter), "=qm" (c)
8056 : "er" (i), "m" (v->counter) : "memory");
8057 return c;
8058@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
8059 static inline long atomic64_add_return(long i, atomic64_t *v)
8060 {
8061 long __i = i;
8062- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
8063+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
8064+
8065+#ifdef CONFIG_PAX_REFCOUNT
8066+ "jno 0f\n"
8067+ "movq %0, %1\n"
8068+ "int $4\n0:\n"
8069+ _ASM_EXTABLE(0b, 0b)
8070+#endif
8071+
8072+ : "+r" (i), "+m" (v->counter)
8073+ : : "memory");
8074+ return i + __i;
8075+}
8076+
8077+/**
8078+ * atomic64_add_return_unchecked - add and return
8079+ * @i: integer value to add
8080+ * @v: pointer to type atomic64_unchecked_t
8081+ *
8082+ * Atomically adds @i to @v and returns @i + @v
8083+ */
8084+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8085+{
8086+ long __i = i;
8087+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
8088 : "+r" (i), "+m" (v->counter)
8089 : : "memory");
8090 return i + __i;
8091@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
8092 }
8093
8094 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
8095+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8096+{
8097+ return atomic64_add_return_unchecked(1, v);
8098+}
8099 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
8100
8101 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8102@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
8103 return cmpxchg(&v->counter, old, new);
8104 }
8105
8106+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8107+{
8108+ return cmpxchg(&v->counter, old, new);
8109+}
8110+
8111 static inline long atomic64_xchg(atomic64_t *v, long new)
8112 {
8113 return xchg(&v->counter, new);
8114 }
8115
8116+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8117+{
8118+ return xchg(&v->counter, new);
8119+}
8120+
8121 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
8122 {
8123 return cmpxchg(&v->counter, old, new);
8124 }
8125
8126+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8127+{
8128+ return cmpxchg(&v->counter, old, new);
8129+}
8130+
8131 static inline long atomic_xchg(atomic_t *v, int new)
8132 {
8133 return xchg(&v->counter, new);
8134 }
8135
8136+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8137+{
8138+ return xchg(&v->counter, new);
8139+}
8140+
8141 /**
8142 * atomic_add_unless - add unless the number is a given value
8143 * @v: pointer of type atomic_t
8144@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
8145 */
8146 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8147 {
8148- int c, old;
8149+ int c, old, new;
8150 c = atomic_read(v);
8151 for (;;) {
8152- if (unlikely(c == (u)))
8153+ if (unlikely(c == u))
8154 break;
8155- old = atomic_cmpxchg((v), c, c + (a));
8156+
8157+ asm volatile("addl %2,%0\n"
8158+
8159+#ifdef CONFIG_PAX_REFCOUNT
8160+ "jno 0f\n"
8161+ "subl %2,%0\n"
8162+ "int $4\n0:\n"
8163+ _ASM_EXTABLE(0b, 0b)
8164+#endif
8165+
8166+ : "=r" (new)
8167+ : "0" (c), "ir" (a));
8168+
8169+ old = atomic_cmpxchg(v, c, new);
8170 if (likely(old == c))
8171 break;
8172 c = old;
8173 }
8174- return c != (u);
8175+ return c != u;
8176 }
8177
8178 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8179@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
8180 */
8181 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8182 {
8183- long c, old;
8184+ long c, old, new;
8185 c = atomic64_read(v);
8186 for (;;) {
8187- if (unlikely(c == (u)))
8188+ if (unlikely(c == u))
8189 break;
8190- old = atomic64_cmpxchg((v), c, c + (a));
8191+
8192+ asm volatile("addq %2,%0\n"
8193+
8194+#ifdef CONFIG_PAX_REFCOUNT
8195+ "jno 0f\n"
8196+ "subq %2,%0\n"
8197+ "int $4\n0:\n"
8198+ _ASM_EXTABLE(0b, 0b)
8199+#endif
8200+
8201+ : "=r" (new)
8202+ : "0" (c), "er" (a));
8203+
8204+ old = atomic64_cmpxchg(v, c, new);
8205 if (likely(old == c))
8206 break;
8207 c = old;
8208 }
8209- return c != (u);
8210+ return c != u;
8211 }
8212
8213 /**
8214diff -urNp linux-2.6.32.46/arch/x86/include/asm/bitops.h linux-2.6.32.46/arch/x86/include/asm/bitops.h
8215--- linux-2.6.32.46/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
8216+++ linux-2.6.32.46/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
8217@@ -38,7 +38,7 @@
8218 * a mask operation on a byte.
8219 */
8220 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8221-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8222+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8223 #define CONST_MASK(nr) (1 << ((nr) & 7))
8224
8225 /**
8226diff -urNp linux-2.6.32.46/arch/x86/include/asm/boot.h linux-2.6.32.46/arch/x86/include/asm/boot.h
8227--- linux-2.6.32.46/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
8228+++ linux-2.6.32.46/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
8229@@ -11,10 +11,15 @@
8230 #include <asm/pgtable_types.h>
8231
8232 /* Physical address where kernel should be loaded. */
8233-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8234+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8235 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8236 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8237
8238+#ifndef __ASSEMBLY__
8239+extern unsigned char __LOAD_PHYSICAL_ADDR[];
8240+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8241+#endif
8242+
8243 /* Minimum kernel alignment, as a power of two */
8244 #ifdef CONFIG_X86_64
8245 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8246diff -urNp linux-2.6.32.46/arch/x86/include/asm/cacheflush.h linux-2.6.32.46/arch/x86/include/asm/cacheflush.h
8247--- linux-2.6.32.46/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
8248+++ linux-2.6.32.46/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
8249@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
8250 static inline unsigned long get_page_memtype(struct page *pg)
8251 {
8252 if (!PageUncached(pg) && !PageWC(pg))
8253- return -1;
8254+ return ~0UL;
8255 else if (!PageUncached(pg) && PageWC(pg))
8256 return _PAGE_CACHE_WC;
8257 else if (PageUncached(pg) && !PageWC(pg))
8258@@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
8259 SetPageWC(pg);
8260 break;
8261 default:
8262- case -1:
8263+ case ~0UL:
8264 ClearPageUncached(pg);
8265 ClearPageWC(pg);
8266 break;
8267diff -urNp linux-2.6.32.46/arch/x86/include/asm/cache.h linux-2.6.32.46/arch/x86/include/asm/cache.h
8268--- linux-2.6.32.46/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
8269+++ linux-2.6.32.46/arch/x86/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
8270@@ -5,9 +5,10 @@
8271
8272 /* L1 cache line size */
8273 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8274-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8275+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8276
8277 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
8278+#define __read_only __attribute__((__section__(".data.read_only")))
8279
8280 #ifdef CONFIG_X86_VSMP
8281 /* vSMP Internode cacheline shift */
8282diff -urNp linux-2.6.32.46/arch/x86/include/asm/checksum_32.h linux-2.6.32.46/arch/x86/include/asm/checksum_32.h
8283--- linux-2.6.32.46/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
8284+++ linux-2.6.32.46/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
8285@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
8286 int len, __wsum sum,
8287 int *src_err_ptr, int *dst_err_ptr);
8288
8289+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8290+ int len, __wsum sum,
8291+ int *src_err_ptr, int *dst_err_ptr);
8292+
8293+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8294+ int len, __wsum sum,
8295+ int *src_err_ptr, int *dst_err_ptr);
8296+
8297 /*
8298 * Note: when you get a NULL pointer exception here this means someone
8299 * passed in an incorrect kernel address to one of these functions.
8300@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
8301 int *err_ptr)
8302 {
8303 might_sleep();
8304- return csum_partial_copy_generic((__force void *)src, dst,
8305+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
8306 len, sum, err_ptr, NULL);
8307 }
8308
8309@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
8310 {
8311 might_sleep();
8312 if (access_ok(VERIFY_WRITE, dst, len))
8313- return csum_partial_copy_generic(src, (__force void *)dst,
8314+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8315 len, sum, NULL, err_ptr);
8316
8317 if (len)
8318diff -urNp linux-2.6.32.46/arch/x86/include/asm/desc_defs.h linux-2.6.32.46/arch/x86/include/asm/desc_defs.h
8319--- linux-2.6.32.46/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
8320+++ linux-2.6.32.46/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
8321@@ -31,6 +31,12 @@ struct desc_struct {
8322 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8323 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8324 };
8325+ struct {
8326+ u16 offset_low;
8327+ u16 seg;
8328+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8329+ unsigned offset_high: 16;
8330+ } gate;
8331 };
8332 } __attribute__((packed));
8333
8334diff -urNp linux-2.6.32.46/arch/x86/include/asm/desc.h linux-2.6.32.46/arch/x86/include/asm/desc.h
8335--- linux-2.6.32.46/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
8336+++ linux-2.6.32.46/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
8337@@ -4,6 +4,7 @@
8338 #include <asm/desc_defs.h>
8339 #include <asm/ldt.h>
8340 #include <asm/mmu.h>
8341+#include <asm/pgtable.h>
8342 #include <linux/smp.h>
8343
8344 static inline void fill_ldt(struct desc_struct *desc,
8345@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8346 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8347 desc->type = (info->read_exec_only ^ 1) << 1;
8348 desc->type |= info->contents << 2;
8349+ desc->type |= info->seg_not_present ^ 1;
8350 desc->s = 1;
8351 desc->dpl = 0x3;
8352 desc->p = info->seg_not_present ^ 1;
8353@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8354 }
8355
8356 extern struct desc_ptr idt_descr;
8357-extern gate_desc idt_table[];
8358-
8359-struct gdt_page {
8360- struct desc_struct gdt[GDT_ENTRIES];
8361-} __attribute__((aligned(PAGE_SIZE)));
8362-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8363+extern gate_desc idt_table[256];
8364
8365+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8366 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8367 {
8368- return per_cpu(gdt_page, cpu).gdt;
8369+ return cpu_gdt_table[cpu];
8370 }
8371
8372 #ifdef CONFIG_X86_64
8373@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8374 unsigned long base, unsigned dpl, unsigned flags,
8375 unsigned short seg)
8376 {
8377- gate->a = (seg << 16) | (base & 0xffff);
8378- gate->b = (base & 0xffff0000) |
8379- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8380+ gate->gate.offset_low = base;
8381+ gate->gate.seg = seg;
8382+ gate->gate.reserved = 0;
8383+ gate->gate.type = type;
8384+ gate->gate.s = 0;
8385+ gate->gate.dpl = dpl;
8386+ gate->gate.p = 1;
8387+ gate->gate.offset_high = base >> 16;
8388 }
8389
8390 #endif
8391@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8392 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8393 const gate_desc *gate)
8394 {
8395+ pax_open_kernel();
8396 memcpy(&idt[entry], gate, sizeof(*gate));
8397+ pax_close_kernel();
8398 }
8399
8400 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8401 const void *desc)
8402 {
8403+ pax_open_kernel();
8404 memcpy(&ldt[entry], desc, 8);
8405+ pax_close_kernel();
8406 }
8407
8408 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8409@@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8410 size = sizeof(struct desc_struct);
8411 break;
8412 }
8413+
8414+ pax_open_kernel();
8415 memcpy(&gdt[entry], desc, size);
8416+ pax_close_kernel();
8417 }
8418
8419 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8420@@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8421
8422 static inline void native_load_tr_desc(void)
8423 {
8424+ pax_open_kernel();
8425 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8426+ pax_close_kernel();
8427 }
8428
8429 static inline void native_load_gdt(const struct desc_ptr *dtr)
8430@@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8431 unsigned int i;
8432 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8433
8434+ pax_open_kernel();
8435 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8436 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8437+ pax_close_kernel();
8438 }
8439
8440 #define _LDT_empty(info) \
8441@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8442 desc->limit = (limit >> 16) & 0xf;
8443 }
8444
8445-static inline void _set_gate(int gate, unsigned type, void *addr,
8446+static inline void _set_gate(int gate, unsigned type, const void *addr,
8447 unsigned dpl, unsigned ist, unsigned seg)
8448 {
8449 gate_desc s;
8450@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8451 * Pentium F0 0F bugfix can have resulted in the mapped
8452 * IDT being write-protected.
8453 */
8454-static inline void set_intr_gate(unsigned int n, void *addr)
8455+static inline void set_intr_gate(unsigned int n, const void *addr)
8456 {
8457 BUG_ON((unsigned)n > 0xFF);
8458 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8459@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8460 /*
8461 * This routine sets up an interrupt gate at directory privilege level 3.
8462 */
8463-static inline void set_system_intr_gate(unsigned int n, void *addr)
8464+static inline void set_system_intr_gate(unsigned int n, const void *addr)
8465 {
8466 BUG_ON((unsigned)n > 0xFF);
8467 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8468 }
8469
8470-static inline void set_system_trap_gate(unsigned int n, void *addr)
8471+static inline void set_system_trap_gate(unsigned int n, const void *addr)
8472 {
8473 BUG_ON((unsigned)n > 0xFF);
8474 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8475 }
8476
8477-static inline void set_trap_gate(unsigned int n, void *addr)
8478+static inline void set_trap_gate(unsigned int n, const void *addr)
8479 {
8480 BUG_ON((unsigned)n > 0xFF);
8481 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8482@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8483 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8484 {
8485 BUG_ON((unsigned)n > 0xFF);
8486- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8487+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8488 }
8489
8490-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8491+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8492 {
8493 BUG_ON((unsigned)n > 0xFF);
8494 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8495 }
8496
8497-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8498+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8499 {
8500 BUG_ON((unsigned)n > 0xFF);
8501 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8502 }
8503
8504+#ifdef CONFIG_X86_32
8505+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8506+{
8507+ struct desc_struct d;
8508+
8509+ if (likely(limit))
8510+ limit = (limit - 1UL) >> PAGE_SHIFT;
8511+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
8512+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8513+}
8514+#endif
8515+
8516 #endif /* _ASM_X86_DESC_H */
8517diff -urNp linux-2.6.32.46/arch/x86/include/asm/device.h linux-2.6.32.46/arch/x86/include/asm/device.h
8518--- linux-2.6.32.46/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8519+++ linux-2.6.32.46/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8520@@ -6,7 +6,7 @@ struct dev_archdata {
8521 void *acpi_handle;
8522 #endif
8523 #ifdef CONFIG_X86_64
8524-struct dma_map_ops *dma_ops;
8525+ const struct dma_map_ops *dma_ops;
8526 #endif
8527 #ifdef CONFIG_DMAR
8528 void *iommu; /* hook for IOMMU specific extension */
8529diff -urNp linux-2.6.32.46/arch/x86/include/asm/dma-mapping.h linux-2.6.32.46/arch/x86/include/asm/dma-mapping.h
8530--- linux-2.6.32.46/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8531+++ linux-2.6.32.46/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8532@@ -25,9 +25,9 @@ extern int iommu_merge;
8533 extern struct device x86_dma_fallback_dev;
8534 extern int panic_on_overflow;
8535
8536-extern struct dma_map_ops *dma_ops;
8537+extern const struct dma_map_ops *dma_ops;
8538
8539-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8540+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8541 {
8542 #ifdef CONFIG_X86_32
8543 return dma_ops;
8544@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8545 /* Make sure we keep the same behaviour */
8546 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8547 {
8548- struct dma_map_ops *ops = get_dma_ops(dev);
8549+ const struct dma_map_ops *ops = get_dma_ops(dev);
8550 if (ops->mapping_error)
8551 return ops->mapping_error(dev, dma_addr);
8552
8553@@ -122,7 +122,7 @@ static inline void *
8554 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8555 gfp_t gfp)
8556 {
8557- struct dma_map_ops *ops = get_dma_ops(dev);
8558+ const struct dma_map_ops *ops = get_dma_ops(dev);
8559 void *memory;
8560
8561 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8562@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8563 static inline void dma_free_coherent(struct device *dev, size_t size,
8564 void *vaddr, dma_addr_t bus)
8565 {
8566- struct dma_map_ops *ops = get_dma_ops(dev);
8567+ const struct dma_map_ops *ops = get_dma_ops(dev);
8568
8569 WARN_ON(irqs_disabled()); /* for portability */
8570
8571diff -urNp linux-2.6.32.46/arch/x86/include/asm/e820.h linux-2.6.32.46/arch/x86/include/asm/e820.h
8572--- linux-2.6.32.46/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8573+++ linux-2.6.32.46/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8574@@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8575 #define ISA_END_ADDRESS 0x100000
8576 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8577
8578-#define BIOS_BEGIN 0x000a0000
8579+#define BIOS_BEGIN 0x000c0000
8580 #define BIOS_END 0x00100000
8581
8582 #ifdef __KERNEL__
8583diff -urNp linux-2.6.32.46/arch/x86/include/asm/elf.h linux-2.6.32.46/arch/x86/include/asm/elf.h
8584--- linux-2.6.32.46/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8585+++ linux-2.6.32.46/arch/x86/include/asm/elf.h 2011-08-23 20:24:19.000000000 -0400
8586@@ -257,7 +257,25 @@ extern int force_personality32;
8587 the loader. We need to make sure that it is out of the way of the program
8588 that it will "exec", and that there is sufficient room for the brk. */
8589
8590+#ifdef CONFIG_PAX_SEGMEXEC
8591+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8592+#else
8593 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8594+#endif
8595+
8596+#ifdef CONFIG_PAX_ASLR
8597+#ifdef CONFIG_X86_32
8598+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8599+
8600+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8601+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8602+#else
8603+#define PAX_ELF_ET_DYN_BASE 0x400000UL
8604+
8605+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8606+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8607+#endif
8608+#endif
8609
8610 /* This yields a mask that user programs can use to figure out what
8611 instruction set this CPU supports. This could be done in user space,
8612@@ -310,9 +328,7 @@ do { \
8613
8614 #define ARCH_DLINFO \
8615 do { \
8616- if (vdso_enabled) \
8617- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8618- (unsigned long)current->mm->context.vdso); \
8619+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8620 } while (0)
8621
8622 #define AT_SYSINFO 32
8623@@ -323,7 +339,7 @@ do { \
8624
8625 #endif /* !CONFIG_X86_32 */
8626
8627-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8628+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8629
8630 #define VDSO_ENTRY \
8631 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8632@@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(s
8633 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8634 #define compat_arch_setup_additional_pages syscall32_setup_pages
8635
8636-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8637-#define arch_randomize_brk arch_randomize_brk
8638-
8639 #endif /* _ASM_X86_ELF_H */
8640diff -urNp linux-2.6.32.46/arch/x86/include/asm/emergency-restart.h linux-2.6.32.46/arch/x86/include/asm/emergency-restart.h
8641--- linux-2.6.32.46/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8642+++ linux-2.6.32.46/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8643@@ -15,6 +15,6 @@ enum reboot_type {
8644
8645 extern enum reboot_type reboot_type;
8646
8647-extern void machine_emergency_restart(void);
8648+extern void machine_emergency_restart(void) __noreturn;
8649
8650 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8651diff -urNp linux-2.6.32.46/arch/x86/include/asm/futex.h linux-2.6.32.46/arch/x86/include/asm/futex.h
8652--- linux-2.6.32.46/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8653+++ linux-2.6.32.46/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8654@@ -12,16 +12,18 @@
8655 #include <asm/system.h>
8656
8657 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8658+ typecheck(u32 *, uaddr); \
8659 asm volatile("1:\t" insn "\n" \
8660 "2:\t.section .fixup,\"ax\"\n" \
8661 "3:\tmov\t%3, %1\n" \
8662 "\tjmp\t2b\n" \
8663 "\t.previous\n" \
8664 _ASM_EXTABLE(1b, 3b) \
8665- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8666+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8667 : "i" (-EFAULT), "0" (oparg), "1" (0))
8668
8669 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8670+ typecheck(u32 *, uaddr); \
8671 asm volatile("1:\tmovl %2, %0\n" \
8672 "\tmovl\t%0, %3\n" \
8673 "\t" insn "\n" \
8674@@ -34,10 +36,10 @@
8675 _ASM_EXTABLE(1b, 4b) \
8676 _ASM_EXTABLE(2b, 4b) \
8677 : "=&a" (oldval), "=&r" (ret), \
8678- "+m" (*uaddr), "=&r" (tem) \
8679+ "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8680 : "r" (oparg), "i" (-EFAULT), "1" (0))
8681
8682-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8683+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8684 {
8685 int op = (encoded_op >> 28) & 7;
8686 int cmp = (encoded_op >> 24) & 15;
8687@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8688
8689 switch (op) {
8690 case FUTEX_OP_SET:
8691- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8692+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8693 break;
8694 case FUTEX_OP_ADD:
8695- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8696+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8697 uaddr, oparg);
8698 break;
8699 case FUTEX_OP_OR:
8700@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8701 return ret;
8702 }
8703
8704-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8705+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8706 int newval)
8707 {
8708
8709@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8710 return -ENOSYS;
8711 #endif
8712
8713- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8714+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8715 return -EFAULT;
8716
8717- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8718+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8719 "2:\t.section .fixup, \"ax\"\n"
8720 "3:\tmov %2, %0\n"
8721 "\tjmp 2b\n"
8722 "\t.previous\n"
8723 _ASM_EXTABLE(1b, 3b)
8724- : "=a" (oldval), "+m" (*uaddr)
8725+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8726 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8727 : "memory"
8728 );
8729diff -urNp linux-2.6.32.46/arch/x86/include/asm/hw_irq.h linux-2.6.32.46/arch/x86/include/asm/hw_irq.h
8730--- linux-2.6.32.46/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8731+++ linux-2.6.32.46/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8732@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8733 extern void enable_IO_APIC(void);
8734
8735 /* Statistics */
8736-extern atomic_t irq_err_count;
8737-extern atomic_t irq_mis_count;
8738+extern atomic_unchecked_t irq_err_count;
8739+extern atomic_unchecked_t irq_mis_count;
8740
8741 /* EISA */
8742 extern void eisa_set_level_irq(unsigned int irq);
8743diff -urNp linux-2.6.32.46/arch/x86/include/asm/i387.h linux-2.6.32.46/arch/x86/include/asm/i387.h
8744--- linux-2.6.32.46/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8745+++ linux-2.6.32.46/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8746@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8747 {
8748 int err;
8749
8750+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8751+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8752+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8753+#endif
8754+
8755 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8756 "2:\n"
8757 ".section .fixup,\"ax\"\n"
8758@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8759 {
8760 int err;
8761
8762+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8763+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8764+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8765+#endif
8766+
8767 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8768 "2:\n"
8769 ".section .fixup,\"ax\"\n"
8770@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8771 }
8772
8773 /* We need a safe address that is cheap to find and that is already
8774- in L1 during context switch. The best choices are unfortunately
8775- different for UP and SMP */
8776-#ifdef CONFIG_SMP
8777-#define safe_address (__per_cpu_offset[0])
8778-#else
8779-#define safe_address (kstat_cpu(0).cpustat.user)
8780-#endif
8781+ in L1 during context switch. */
8782+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8783
8784 /*
8785 * These must be called with preempt disabled
8786@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8787 struct thread_info *me = current_thread_info();
8788 preempt_disable();
8789 if (me->status & TS_USEDFPU)
8790- __save_init_fpu(me->task);
8791+ __save_init_fpu(current);
8792 else
8793 clts();
8794 }
8795diff -urNp linux-2.6.32.46/arch/x86/include/asm/io_32.h linux-2.6.32.46/arch/x86/include/asm/io_32.h
8796--- linux-2.6.32.46/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8797+++ linux-2.6.32.46/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8798@@ -3,6 +3,7 @@
8799
8800 #include <linux/string.h>
8801 #include <linux/compiler.h>
8802+#include <asm/processor.h>
8803
8804 /*
8805 * This file contains the definitions for the x86 IO instructions
8806@@ -42,6 +43,17 @@
8807
8808 #ifdef __KERNEL__
8809
8810+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8811+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8812+{
8813+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8814+}
8815+
8816+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8817+{
8818+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8819+}
8820+
8821 #include <asm-generic/iomap.h>
8822
8823 #include <linux/vmalloc.h>
8824diff -urNp linux-2.6.32.46/arch/x86/include/asm/io_64.h linux-2.6.32.46/arch/x86/include/asm/io_64.h
8825--- linux-2.6.32.46/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8826+++ linux-2.6.32.46/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8827@@ -140,6 +140,17 @@ __OUTS(l)
8828
8829 #include <linux/vmalloc.h>
8830
8831+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8832+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8833+{
8834+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8835+}
8836+
8837+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8838+{
8839+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8840+}
8841+
8842 #include <asm-generic/iomap.h>
8843
8844 void __memcpy_fromio(void *, unsigned long, unsigned);
8845diff -urNp linux-2.6.32.46/arch/x86/include/asm/iommu.h linux-2.6.32.46/arch/x86/include/asm/iommu.h
8846--- linux-2.6.32.46/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8847+++ linux-2.6.32.46/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8848@@ -3,7 +3,7 @@
8849
8850 extern void pci_iommu_shutdown(void);
8851 extern void no_iommu_init(void);
8852-extern struct dma_map_ops nommu_dma_ops;
8853+extern const struct dma_map_ops nommu_dma_ops;
8854 extern int force_iommu, no_iommu;
8855 extern int iommu_detected;
8856 extern int iommu_pass_through;
8857diff -urNp linux-2.6.32.46/arch/x86/include/asm/irqflags.h linux-2.6.32.46/arch/x86/include/asm/irqflags.h
8858--- linux-2.6.32.46/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8859+++ linux-2.6.32.46/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8860@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8861 sti; \
8862 sysexit
8863
8864+#define GET_CR0_INTO_RDI mov %cr0, %rdi
8865+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8866+#define GET_CR3_INTO_RDI mov %cr3, %rdi
8867+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8868+
8869 #else
8870 #define INTERRUPT_RETURN iret
8871 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8872diff -urNp linux-2.6.32.46/arch/x86/include/asm/kprobes.h linux-2.6.32.46/arch/x86/include/asm/kprobes.h
8873--- linux-2.6.32.46/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8874+++ linux-2.6.32.46/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8875@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8876 #define BREAKPOINT_INSTRUCTION 0xcc
8877 #define RELATIVEJUMP_INSTRUCTION 0xe9
8878 #define MAX_INSN_SIZE 16
8879-#define MAX_STACK_SIZE 64
8880-#define MIN_STACK_SIZE(ADDR) \
8881- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8882- THREAD_SIZE - (unsigned long)(ADDR))) \
8883- ? (MAX_STACK_SIZE) \
8884- : (((unsigned long)current_thread_info()) + \
8885- THREAD_SIZE - (unsigned long)(ADDR)))
8886+#define MAX_STACK_SIZE 64UL
8887+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8888
8889 #define flush_insn_slot(p) do { } while (0)
8890
8891diff -urNp linux-2.6.32.46/arch/x86/include/asm/kvm_host.h linux-2.6.32.46/arch/x86/include/asm/kvm_host.h
8892--- linux-2.6.32.46/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8893+++ linux-2.6.32.46/arch/x86/include/asm/kvm_host.h 2011-08-26 20:19:09.000000000 -0400
8894@@ -534,9 +534,9 @@ struct kvm_x86_ops {
8895 bool (*gb_page_enable)(void);
8896
8897 const struct trace_print_flags *exit_reasons_str;
8898-};
8899+} __do_const;
8900
8901-extern struct kvm_x86_ops *kvm_x86_ops;
8902+extern const struct kvm_x86_ops *kvm_x86_ops;
8903
8904 int kvm_mmu_module_init(void);
8905 void kvm_mmu_module_exit(void);
8906diff -urNp linux-2.6.32.46/arch/x86/include/asm/local.h linux-2.6.32.46/arch/x86/include/asm/local.h
8907--- linux-2.6.32.46/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8908+++ linux-2.6.32.46/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8909@@ -18,26 +18,58 @@ typedef struct {
8910
8911 static inline void local_inc(local_t *l)
8912 {
8913- asm volatile(_ASM_INC "%0"
8914+ asm volatile(_ASM_INC "%0\n"
8915+
8916+#ifdef CONFIG_PAX_REFCOUNT
8917+ "jno 0f\n"
8918+ _ASM_DEC "%0\n"
8919+ "int $4\n0:\n"
8920+ _ASM_EXTABLE(0b, 0b)
8921+#endif
8922+
8923 : "+m" (l->a.counter));
8924 }
8925
8926 static inline void local_dec(local_t *l)
8927 {
8928- asm volatile(_ASM_DEC "%0"
8929+ asm volatile(_ASM_DEC "%0\n"
8930+
8931+#ifdef CONFIG_PAX_REFCOUNT
8932+ "jno 0f\n"
8933+ _ASM_INC "%0\n"
8934+ "int $4\n0:\n"
8935+ _ASM_EXTABLE(0b, 0b)
8936+#endif
8937+
8938 : "+m" (l->a.counter));
8939 }
8940
8941 static inline void local_add(long i, local_t *l)
8942 {
8943- asm volatile(_ASM_ADD "%1,%0"
8944+ asm volatile(_ASM_ADD "%1,%0\n"
8945+
8946+#ifdef CONFIG_PAX_REFCOUNT
8947+ "jno 0f\n"
8948+ _ASM_SUB "%1,%0\n"
8949+ "int $4\n0:\n"
8950+ _ASM_EXTABLE(0b, 0b)
8951+#endif
8952+
8953 : "+m" (l->a.counter)
8954 : "ir" (i));
8955 }
8956
8957 static inline void local_sub(long i, local_t *l)
8958 {
8959- asm volatile(_ASM_SUB "%1,%0"
8960+ asm volatile(_ASM_SUB "%1,%0\n"
8961+
8962+#ifdef CONFIG_PAX_REFCOUNT
8963+ "jno 0f\n"
8964+ _ASM_ADD "%1,%0\n"
8965+ "int $4\n0:\n"
8966+ _ASM_EXTABLE(0b, 0b)
8967+#endif
8968+
8969 : "+m" (l->a.counter)
8970 : "ir" (i));
8971 }
8972@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8973 {
8974 unsigned char c;
8975
8976- asm volatile(_ASM_SUB "%2,%0; sete %1"
8977+ asm volatile(_ASM_SUB "%2,%0\n"
8978+
8979+#ifdef CONFIG_PAX_REFCOUNT
8980+ "jno 0f\n"
8981+ _ASM_ADD "%2,%0\n"
8982+ "int $4\n0:\n"
8983+ _ASM_EXTABLE(0b, 0b)
8984+#endif
8985+
8986+ "sete %1\n"
8987 : "+m" (l->a.counter), "=qm" (c)
8988 : "ir" (i) : "memory");
8989 return c;
8990@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8991 {
8992 unsigned char c;
8993
8994- asm volatile(_ASM_DEC "%0; sete %1"
8995+ asm volatile(_ASM_DEC "%0\n"
8996+
8997+#ifdef CONFIG_PAX_REFCOUNT
8998+ "jno 0f\n"
8999+ _ASM_INC "%0\n"
9000+ "int $4\n0:\n"
9001+ _ASM_EXTABLE(0b, 0b)
9002+#endif
9003+
9004+ "sete %1\n"
9005 : "+m" (l->a.counter), "=qm" (c)
9006 : : "memory");
9007 return c != 0;
9008@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
9009 {
9010 unsigned char c;
9011
9012- asm volatile(_ASM_INC "%0; sete %1"
9013+ asm volatile(_ASM_INC "%0\n"
9014+
9015+#ifdef CONFIG_PAX_REFCOUNT
9016+ "jno 0f\n"
9017+ _ASM_DEC "%0\n"
9018+ "int $4\n0:\n"
9019+ _ASM_EXTABLE(0b, 0b)
9020+#endif
9021+
9022+ "sete %1\n"
9023 : "+m" (l->a.counter), "=qm" (c)
9024 : : "memory");
9025 return c != 0;
9026@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
9027 {
9028 unsigned char c;
9029
9030- asm volatile(_ASM_ADD "%2,%0; sets %1"
9031+ asm volatile(_ASM_ADD "%2,%0\n"
9032+
9033+#ifdef CONFIG_PAX_REFCOUNT
9034+ "jno 0f\n"
9035+ _ASM_SUB "%2,%0\n"
9036+ "int $4\n0:\n"
9037+ _ASM_EXTABLE(0b, 0b)
9038+#endif
9039+
9040+ "sets %1\n"
9041 : "+m" (l->a.counter), "=qm" (c)
9042 : "ir" (i) : "memory");
9043 return c;
9044@@ -133,7 +201,15 @@ static inline long local_add_return(long
9045 #endif
9046 /* Modern 486+ processor */
9047 __i = i;
9048- asm volatile(_ASM_XADD "%0, %1;"
9049+ asm volatile(_ASM_XADD "%0, %1\n"
9050+
9051+#ifdef CONFIG_PAX_REFCOUNT
9052+ "jno 0f\n"
9053+ _ASM_MOV "%0,%1\n"
9054+ "int $4\n0:\n"
9055+ _ASM_EXTABLE(0b, 0b)
9056+#endif
9057+
9058 : "+r" (i), "+m" (l->a.counter)
9059 : : "memory");
9060 return i + __i;
9061diff -urNp linux-2.6.32.46/arch/x86/include/asm/microcode.h linux-2.6.32.46/arch/x86/include/asm/microcode.h
9062--- linux-2.6.32.46/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
9063+++ linux-2.6.32.46/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
9064@@ -12,13 +12,13 @@ struct device;
9065 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
9066
9067 struct microcode_ops {
9068- enum ucode_state (*request_microcode_user) (int cpu,
9069+ enum ucode_state (* const request_microcode_user) (int cpu,
9070 const void __user *buf, size_t size);
9071
9072- enum ucode_state (*request_microcode_fw) (int cpu,
9073+ enum ucode_state (* const request_microcode_fw) (int cpu,
9074 struct device *device);
9075
9076- void (*microcode_fini_cpu) (int cpu);
9077+ void (* const microcode_fini_cpu) (int cpu);
9078
9079 /*
9080 * The generic 'microcode_core' part guarantees that
9081@@ -38,18 +38,18 @@ struct ucode_cpu_info {
9082 extern struct ucode_cpu_info ucode_cpu_info[];
9083
9084 #ifdef CONFIG_MICROCODE_INTEL
9085-extern struct microcode_ops * __init init_intel_microcode(void);
9086+extern const struct microcode_ops * __init init_intel_microcode(void);
9087 #else
9088-static inline struct microcode_ops * __init init_intel_microcode(void)
9089+static inline const struct microcode_ops * __init init_intel_microcode(void)
9090 {
9091 return NULL;
9092 }
9093 #endif /* CONFIG_MICROCODE_INTEL */
9094
9095 #ifdef CONFIG_MICROCODE_AMD
9096-extern struct microcode_ops * __init init_amd_microcode(void);
9097+extern const struct microcode_ops * __init init_amd_microcode(void);
9098 #else
9099-static inline struct microcode_ops * __init init_amd_microcode(void)
9100+static inline const struct microcode_ops * __init init_amd_microcode(void)
9101 {
9102 return NULL;
9103 }
9104diff -urNp linux-2.6.32.46/arch/x86/include/asm/mman.h linux-2.6.32.46/arch/x86/include/asm/mman.h
9105--- linux-2.6.32.46/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
9106+++ linux-2.6.32.46/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
9107@@ -5,4 +5,14 @@
9108
9109 #include <asm-generic/mman.h>
9110
9111+#ifdef __KERNEL__
9112+#ifndef __ASSEMBLY__
9113+#ifdef CONFIG_X86_32
9114+#define arch_mmap_check i386_mmap_check
9115+int i386_mmap_check(unsigned long addr, unsigned long len,
9116+ unsigned long flags);
9117+#endif
9118+#endif
9119+#endif
9120+
9121 #endif /* _ASM_X86_MMAN_H */
9122diff -urNp linux-2.6.32.46/arch/x86/include/asm/mmu_context.h linux-2.6.32.46/arch/x86/include/asm/mmu_context.h
9123--- linux-2.6.32.46/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
9124+++ linux-2.6.32.46/arch/x86/include/asm/mmu_context.h 2011-08-23 20:24:19.000000000 -0400
9125@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *m
9126
9127 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9128 {
9129+
9130+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9131+ unsigned int i;
9132+ pgd_t *pgd;
9133+
9134+ pax_open_kernel();
9135+ pgd = get_cpu_pgd(smp_processor_id());
9136+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
9137+ set_pgd_batched(pgd+i, native_make_pgd(0));
9138+ pax_close_kernel();
9139+#endif
9140+
9141 #ifdef CONFIG_SMP
9142 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
9143 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
9144@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_s
9145 struct task_struct *tsk)
9146 {
9147 unsigned cpu = smp_processor_id();
9148+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
9149+ int tlbstate = TLBSTATE_OK;
9150+#endif
9151
9152 if (likely(prev != next)) {
9153 #ifdef CONFIG_SMP
9154+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9155+ tlbstate = percpu_read(cpu_tlbstate.state);
9156+#endif
9157 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9158 percpu_write(cpu_tlbstate.active_mm, next);
9159 #endif
9160 cpumask_set_cpu(cpu, mm_cpumask(next));
9161
9162 /* Re-load page tables */
9163+#ifdef CONFIG_PAX_PER_CPU_PGD
9164+ pax_open_kernel();
9165+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9166+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9167+ pax_close_kernel();
9168+ load_cr3(get_cpu_pgd(cpu));
9169+#else
9170 load_cr3(next->pgd);
9171+#endif
9172
9173 /* stop flush ipis for the previous mm */
9174 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9175@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_s
9176 */
9177 if (unlikely(prev->context.ldt != next->context.ldt))
9178 load_LDT_nolock(&next->context);
9179- }
9180+
9181+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9182+ if (!nx_enabled) {
9183+ smp_mb__before_clear_bit();
9184+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9185+ smp_mb__after_clear_bit();
9186+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9187+ }
9188+#endif
9189+
9190+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9191+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9192+ prev->context.user_cs_limit != next->context.user_cs_limit))
9193+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9194 #ifdef CONFIG_SMP
9195+ else if (unlikely(tlbstate != TLBSTATE_OK))
9196+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9197+#endif
9198+#endif
9199+
9200+ }
9201 else {
9202+
9203+#ifdef CONFIG_PAX_PER_CPU_PGD
9204+ pax_open_kernel();
9205+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9206+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9207+ pax_close_kernel();
9208+ load_cr3(get_cpu_pgd(cpu));
9209+#endif
9210+
9211+#ifdef CONFIG_SMP
9212 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9213 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9214
9215@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_s
9216 * tlb flush IPI delivery. We must reload CR3
9217 * to make sure to use no freed page tables.
9218 */
9219+
9220+#ifndef CONFIG_PAX_PER_CPU_PGD
9221 load_cr3(next->pgd);
9222+#endif
9223+
9224 load_LDT_nolock(&next->context);
9225+
9226+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9227+ if (!nx_enabled)
9228+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9229+#endif
9230+
9231+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9232+#ifdef CONFIG_PAX_PAGEEXEC
9233+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
9234+#endif
9235+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9236+#endif
9237+
9238 }
9239- }
9240 #endif
9241+ }
9242 }
9243
9244 #define activate_mm(prev, next) \
9245diff -urNp linux-2.6.32.46/arch/x86/include/asm/mmu.h linux-2.6.32.46/arch/x86/include/asm/mmu.h
9246--- linux-2.6.32.46/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
9247+++ linux-2.6.32.46/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
9248@@ -9,10 +9,23 @@
9249 * we put the segment information here.
9250 */
9251 typedef struct {
9252- void *ldt;
9253+ struct desc_struct *ldt;
9254 int size;
9255 struct mutex lock;
9256- void *vdso;
9257+ unsigned long vdso;
9258+
9259+#ifdef CONFIG_X86_32
9260+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9261+ unsigned long user_cs_base;
9262+ unsigned long user_cs_limit;
9263+
9264+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9265+ cpumask_t cpu_user_cs_mask;
9266+#endif
9267+
9268+#endif
9269+#endif
9270+
9271 } mm_context_t;
9272
9273 #ifdef CONFIG_SMP
9274diff -urNp linux-2.6.32.46/arch/x86/include/asm/module.h linux-2.6.32.46/arch/x86/include/asm/module.h
9275--- linux-2.6.32.46/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
9276+++ linux-2.6.32.46/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
9277@@ -5,6 +5,7 @@
9278
9279 #ifdef CONFIG_X86_64
9280 /* X86_64 does not define MODULE_PROC_FAMILY */
9281+#define MODULE_PROC_FAMILY ""
9282 #elif defined CONFIG_M386
9283 #define MODULE_PROC_FAMILY "386 "
9284 #elif defined CONFIG_M486
9285@@ -59,13 +60,36 @@
9286 #error unknown processor family
9287 #endif
9288
9289-#ifdef CONFIG_X86_32
9290-# ifdef CONFIG_4KSTACKS
9291-# define MODULE_STACKSIZE "4KSTACKS "
9292-# else
9293-# define MODULE_STACKSIZE ""
9294-# endif
9295-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
9296+#ifdef CONFIG_PAX_MEMORY_UDEREF
9297+#define MODULE_PAX_UDEREF "UDEREF "
9298+#else
9299+#define MODULE_PAX_UDEREF ""
9300+#endif
9301+
9302+#ifdef CONFIG_PAX_KERNEXEC
9303+#define MODULE_PAX_KERNEXEC "KERNEXEC "
9304+#else
9305+#define MODULE_PAX_KERNEXEC ""
9306+#endif
9307+
9308+#ifdef CONFIG_PAX_REFCOUNT
9309+#define MODULE_PAX_REFCOUNT "REFCOUNT "
9310+#else
9311+#define MODULE_PAX_REFCOUNT ""
9312 #endif
9313
9314+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
9315+#define MODULE_STACKSIZE "4KSTACKS "
9316+#else
9317+#define MODULE_STACKSIZE ""
9318+#endif
9319+
9320+#ifdef CONFIG_GRKERNSEC
9321+#define MODULE_GRSEC "GRSECURITY "
9322+#else
9323+#define MODULE_GRSEC ""
9324+#endif
9325+
9326+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
9327+
9328 #endif /* _ASM_X86_MODULE_H */
9329diff -urNp linux-2.6.32.46/arch/x86/include/asm/page_64_types.h linux-2.6.32.46/arch/x86/include/asm/page_64_types.h
9330--- linux-2.6.32.46/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
9331+++ linux-2.6.32.46/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
9332@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9333
9334 /* duplicated to the one in bootmem.h */
9335 extern unsigned long max_pfn;
9336-extern unsigned long phys_base;
9337+extern const unsigned long phys_base;
9338
9339 extern unsigned long __phys_addr(unsigned long);
9340 #define __phys_reloc_hide(x) (x)
9341diff -urNp linux-2.6.32.46/arch/x86/include/asm/paravirt.h linux-2.6.32.46/arch/x86/include/asm/paravirt.h
9342--- linux-2.6.32.46/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
9343+++ linux-2.6.32.46/arch/x86/include/asm/paravirt.h 2011-08-23 21:36:48.000000000 -0400
9344@@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp,
9345 val);
9346 }
9347
9348+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9349+{
9350+ pgdval_t val = native_pgd_val(pgd);
9351+
9352+ if (sizeof(pgdval_t) > sizeof(long))
9353+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9354+ val, (u64)val >> 32);
9355+ else
9356+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9357+ val);
9358+}
9359+
9360 static inline void pgd_clear(pgd_t *pgdp)
9361 {
9362 set_pgd(pgdp, __pgd(0));
9363@@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned
9364 pv_mmu_ops.set_fixmap(idx, phys, flags);
9365 }
9366
9367+#ifdef CONFIG_PAX_KERNEXEC
9368+static inline unsigned long pax_open_kernel(void)
9369+{
9370+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9371+}
9372+
9373+static inline unsigned long pax_close_kernel(void)
9374+{
9375+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9376+}
9377+#else
9378+static inline unsigned long pax_open_kernel(void) { return 0; }
9379+static inline unsigned long pax_close_kernel(void) { return 0; }
9380+#endif
9381+
9382 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9383
9384 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9385@@ -945,7 +972,7 @@ extern void default_banner(void);
9386
9387 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9388 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9389-#define PARA_INDIRECT(addr) *%cs:addr
9390+#define PARA_INDIRECT(addr) *%ss:addr
9391 #endif
9392
9393 #define INTERRUPT_RETURN \
9394@@ -1022,6 +1049,21 @@ extern void default_banner(void);
9395 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9396 CLBR_NONE, \
9397 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9398+
9399+#define GET_CR0_INTO_RDI \
9400+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9401+ mov %rax,%rdi
9402+
9403+#define SET_RDI_INTO_CR0 \
9404+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9405+
9406+#define GET_CR3_INTO_RDI \
9407+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9408+ mov %rax,%rdi
9409+
9410+#define SET_RDI_INTO_CR3 \
9411+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9412+
9413 #endif /* CONFIG_X86_32 */
9414
9415 #endif /* __ASSEMBLY__ */
9416diff -urNp linux-2.6.32.46/arch/x86/include/asm/paravirt_types.h linux-2.6.32.46/arch/x86/include/asm/paravirt_types.h
9417--- linux-2.6.32.46/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
9418+++ linux-2.6.32.46/arch/x86/include/asm/paravirt_types.h 2011-08-23 20:24:19.000000000 -0400
9419@@ -78,19 +78,19 @@ struct pv_init_ops {
9420 */
9421 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9422 unsigned long addr, unsigned len);
9423-};
9424+} __no_const;
9425
9426
9427 struct pv_lazy_ops {
9428 /* Set deferred update mode, used for batching operations. */
9429 void (*enter)(void);
9430 void (*leave)(void);
9431-};
9432+} __no_const;
9433
9434 struct pv_time_ops {
9435 unsigned long long (*sched_clock)(void);
9436 unsigned long (*get_tsc_khz)(void);
9437-};
9438+} __no_const;
9439
9440 struct pv_cpu_ops {
9441 /* hooks for various privileged instructions */
9442@@ -186,7 +186,7 @@ struct pv_cpu_ops {
9443
9444 void (*start_context_switch)(struct task_struct *prev);
9445 void (*end_context_switch)(struct task_struct *next);
9446-};
9447+} __no_const;
9448
9449 struct pv_irq_ops {
9450 /*
9451@@ -217,7 +217,7 @@ struct pv_apic_ops {
9452 unsigned long start_eip,
9453 unsigned long start_esp);
9454 #endif
9455-};
9456+} __no_const;
9457
9458 struct pv_mmu_ops {
9459 unsigned long (*read_cr2)(void);
9460@@ -301,6 +301,7 @@ struct pv_mmu_ops {
9461 struct paravirt_callee_save make_pud;
9462
9463 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9464+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9465 #endif /* PAGETABLE_LEVELS == 4 */
9466 #endif /* PAGETABLE_LEVELS >= 3 */
9467
9468@@ -316,6 +317,12 @@ struct pv_mmu_ops {
9469 an mfn. We can tell which is which from the index. */
9470 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9471 phys_addr_t phys, pgprot_t flags);
9472+
9473+#ifdef CONFIG_PAX_KERNEXEC
9474+ unsigned long (*pax_open_kernel)(void);
9475+ unsigned long (*pax_close_kernel)(void);
9476+#endif
9477+
9478 };
9479
9480 struct raw_spinlock;
9481@@ -326,7 +333,7 @@ struct pv_lock_ops {
9482 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
9483 int (*spin_trylock)(struct raw_spinlock *lock);
9484 void (*spin_unlock)(struct raw_spinlock *lock);
9485-};
9486+} __no_const;
9487
9488 /* This contains all the paravirt structures: we get a convenient
9489 * number for each function using the offset which we use to indicate
9490diff -urNp linux-2.6.32.46/arch/x86/include/asm/pci_x86.h linux-2.6.32.46/arch/x86/include/asm/pci_x86.h
9491--- linux-2.6.32.46/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
9492+++ linux-2.6.32.46/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
9493@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9494 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9495
9496 struct pci_raw_ops {
9497- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9498+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9499 int reg, int len, u32 *val);
9500- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9501+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9502 int reg, int len, u32 val);
9503 };
9504
9505-extern struct pci_raw_ops *raw_pci_ops;
9506-extern struct pci_raw_ops *raw_pci_ext_ops;
9507+extern const struct pci_raw_ops *raw_pci_ops;
9508+extern const struct pci_raw_ops *raw_pci_ext_ops;
9509
9510-extern struct pci_raw_ops pci_direct_conf1;
9511+extern const struct pci_raw_ops pci_direct_conf1;
9512 extern bool port_cf9_safe;
9513
9514 /* arch_initcall level */
9515diff -urNp linux-2.6.32.46/arch/x86/include/asm/percpu.h linux-2.6.32.46/arch/x86/include/asm/percpu.h
9516--- linux-2.6.32.46/arch/x86/include/asm/percpu.h 2011-03-27 14:31:47.000000000 -0400
9517+++ linux-2.6.32.46/arch/x86/include/asm/percpu.h 2011-08-17 19:33:59.000000000 -0400
9518@@ -78,6 +78,7 @@ do { \
9519 if (0) { \
9520 T__ tmp__; \
9521 tmp__ = (val); \
9522+ (void)tmp__; \
9523 } \
9524 switch (sizeof(var)) { \
9525 case 1: \
9526diff -urNp linux-2.6.32.46/arch/x86/include/asm/pgalloc.h linux-2.6.32.46/arch/x86/include/asm/pgalloc.h
9527--- linux-2.6.32.46/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9528+++ linux-2.6.32.46/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9529@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9530 pmd_t *pmd, pte_t *pte)
9531 {
9532 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9533+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9534+}
9535+
9536+static inline void pmd_populate_user(struct mm_struct *mm,
9537+ pmd_t *pmd, pte_t *pte)
9538+{
9539+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9540 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9541 }
9542
9543diff -urNp linux-2.6.32.46/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.46/arch/x86/include/asm/pgtable-2level.h
9544--- linux-2.6.32.46/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9545+++ linux-2.6.32.46/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9546@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9547
9548 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9549 {
9550+ pax_open_kernel();
9551 *pmdp = pmd;
9552+ pax_close_kernel();
9553 }
9554
9555 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9556diff -urNp linux-2.6.32.46/arch/x86/include/asm/pgtable_32.h linux-2.6.32.46/arch/x86/include/asm/pgtable_32.h
9557--- linux-2.6.32.46/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9558+++ linux-2.6.32.46/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9559@@ -26,9 +26,6 @@
9560 struct mm_struct;
9561 struct vm_area_struct;
9562
9563-extern pgd_t swapper_pg_dir[1024];
9564-extern pgd_t trampoline_pg_dir[1024];
9565-
9566 static inline void pgtable_cache_init(void) { }
9567 static inline void check_pgt_cache(void) { }
9568 void paging_init(void);
9569@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9570 # include <asm/pgtable-2level.h>
9571 #endif
9572
9573+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9574+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9575+#ifdef CONFIG_X86_PAE
9576+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9577+#endif
9578+
9579 #if defined(CONFIG_HIGHPTE)
9580 #define __KM_PTE \
9581 (in_nmi() ? KM_NMI_PTE : \
9582@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9583 /* Clear a kernel PTE and flush it from the TLB */
9584 #define kpte_clear_flush(ptep, vaddr) \
9585 do { \
9586+ pax_open_kernel(); \
9587 pte_clear(&init_mm, (vaddr), (ptep)); \
9588+ pax_close_kernel(); \
9589 __flush_tlb_one((vaddr)); \
9590 } while (0)
9591
9592@@ -85,6 +90,9 @@ do { \
9593
9594 #endif /* !__ASSEMBLY__ */
9595
9596+#define HAVE_ARCH_UNMAPPED_AREA
9597+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9598+
9599 /*
9600 * kern_addr_valid() is (1) for FLATMEM and (0) for
9601 * SPARSEMEM and DISCONTIGMEM
9602diff -urNp linux-2.6.32.46/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.46/arch/x86/include/asm/pgtable_32_types.h
9603--- linux-2.6.32.46/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9604+++ linux-2.6.32.46/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9605@@ -8,7 +8,7 @@
9606 */
9607 #ifdef CONFIG_X86_PAE
9608 # include <asm/pgtable-3level_types.h>
9609-# define PMD_SIZE (1UL << PMD_SHIFT)
9610+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9611 # define PMD_MASK (~(PMD_SIZE - 1))
9612 #else
9613 # include <asm/pgtable-2level_types.h>
9614@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9615 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9616 #endif
9617
9618+#ifdef CONFIG_PAX_KERNEXEC
9619+#ifndef __ASSEMBLY__
9620+extern unsigned char MODULES_EXEC_VADDR[];
9621+extern unsigned char MODULES_EXEC_END[];
9622+#endif
9623+#include <asm/boot.h>
9624+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9625+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9626+#else
9627+#define ktla_ktva(addr) (addr)
9628+#define ktva_ktla(addr) (addr)
9629+#endif
9630+
9631 #define MODULES_VADDR VMALLOC_START
9632 #define MODULES_END VMALLOC_END
9633 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9634diff -urNp linux-2.6.32.46/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.46/arch/x86/include/asm/pgtable-3level.h
9635--- linux-2.6.32.46/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9636+++ linux-2.6.32.46/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9637@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9638
9639 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9640 {
9641+ pax_open_kernel();
9642 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9643+ pax_close_kernel();
9644 }
9645
9646 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9647 {
9648+ pax_open_kernel();
9649 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9650+ pax_close_kernel();
9651 }
9652
9653 /*
9654diff -urNp linux-2.6.32.46/arch/x86/include/asm/pgtable_64.h linux-2.6.32.46/arch/x86/include/asm/pgtable_64.h
9655--- linux-2.6.32.46/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9656+++ linux-2.6.32.46/arch/x86/include/asm/pgtable_64.h 2011-08-23 20:24:19.000000000 -0400
9657@@ -16,10 +16,13 @@
9658
9659 extern pud_t level3_kernel_pgt[512];
9660 extern pud_t level3_ident_pgt[512];
9661+extern pud_t level3_vmalloc_pgt[512];
9662+extern pud_t level3_vmemmap_pgt[512];
9663+extern pud_t level2_vmemmap_pgt[512];
9664 extern pmd_t level2_kernel_pgt[512];
9665 extern pmd_t level2_fixmap_pgt[512];
9666-extern pmd_t level2_ident_pgt[512];
9667-extern pgd_t init_level4_pgt[];
9668+extern pmd_t level2_ident_pgt[512*2];
9669+extern pgd_t init_level4_pgt[512];
9670
9671 #define swapper_pg_dir init_level4_pgt
9672
9673@@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9674
9675 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9676 {
9677+ pax_open_kernel();
9678 *pmdp = pmd;
9679+ pax_close_kernel();
9680 }
9681
9682 static inline void native_pmd_clear(pmd_t *pmd)
9683@@ -94,6 +99,13 @@ static inline void native_pud_clear(pud_
9684
9685 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9686 {
9687+ pax_open_kernel();
9688+ *pgdp = pgd;
9689+ pax_close_kernel();
9690+}
9691+
9692+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9693+{
9694 *pgdp = pgd;
9695 }
9696
9697diff -urNp linux-2.6.32.46/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.46/arch/x86/include/asm/pgtable_64_types.h
9698--- linux-2.6.32.46/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9699+++ linux-2.6.32.46/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9700@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9701 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9702 #define MODULES_END _AC(0xffffffffff000000, UL)
9703 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9704+#define MODULES_EXEC_VADDR MODULES_VADDR
9705+#define MODULES_EXEC_END MODULES_END
9706+
9707+#define ktla_ktva(addr) (addr)
9708+#define ktva_ktla(addr) (addr)
9709
9710 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9711diff -urNp linux-2.6.32.46/arch/x86/include/asm/pgtable.h linux-2.6.32.46/arch/x86/include/asm/pgtable.h
9712--- linux-2.6.32.46/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9713+++ linux-2.6.32.46/arch/x86/include/asm/pgtable.h 2011-08-23 20:24:19.000000000 -0400
9714@@ -39,6 +39,7 @@ extern struct list_head pgd_list;
9715
9716 #ifndef __PAGETABLE_PUD_FOLDED
9717 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9718+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9719 #define pgd_clear(pgd) native_pgd_clear(pgd)
9720 #endif
9721
9722@@ -74,12 +75,51 @@ extern struct list_head pgd_list;
9723
9724 #define arch_end_context_switch(prev) do {} while(0)
9725
9726+#define pax_open_kernel() native_pax_open_kernel()
9727+#define pax_close_kernel() native_pax_close_kernel()
9728 #endif /* CONFIG_PARAVIRT */
9729
9730+#define __HAVE_ARCH_PAX_OPEN_KERNEL
9731+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9732+
9733+#ifdef CONFIG_PAX_KERNEXEC
9734+static inline unsigned long native_pax_open_kernel(void)
9735+{
9736+ unsigned long cr0;
9737+
9738+ preempt_disable();
9739+ barrier();
9740+ cr0 = read_cr0() ^ X86_CR0_WP;
9741+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
9742+ write_cr0(cr0);
9743+ return cr0 ^ X86_CR0_WP;
9744+}
9745+
9746+static inline unsigned long native_pax_close_kernel(void)
9747+{
9748+ unsigned long cr0;
9749+
9750+ cr0 = read_cr0() ^ X86_CR0_WP;
9751+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9752+ write_cr0(cr0);
9753+ barrier();
9754+ preempt_enable_no_resched();
9755+ return cr0 ^ X86_CR0_WP;
9756+}
9757+#else
9758+static inline unsigned long native_pax_open_kernel(void) { return 0; }
9759+static inline unsigned long native_pax_close_kernel(void) { return 0; }
9760+#endif
9761+
9762 /*
9763 * The following only work if pte_present() is true.
9764 * Undefined behaviour if not..
9765 */
9766+static inline int pte_user(pte_t pte)
9767+{
9768+ return pte_val(pte) & _PAGE_USER;
9769+}
9770+
9771 static inline int pte_dirty(pte_t pte)
9772 {
9773 return pte_flags(pte) & _PAGE_DIRTY;
9774@@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t
9775 return pte_clear_flags(pte, _PAGE_RW);
9776 }
9777
9778+static inline pte_t pte_mkread(pte_t pte)
9779+{
9780+ return __pte(pte_val(pte) | _PAGE_USER);
9781+}
9782+
9783 static inline pte_t pte_mkexec(pte_t pte)
9784 {
9785- return pte_clear_flags(pte, _PAGE_NX);
9786+#ifdef CONFIG_X86_PAE
9787+ if (__supported_pte_mask & _PAGE_NX)
9788+ return pte_clear_flags(pte, _PAGE_NX);
9789+ else
9790+#endif
9791+ return pte_set_flags(pte, _PAGE_USER);
9792+}
9793+
9794+static inline pte_t pte_exprotect(pte_t pte)
9795+{
9796+#ifdef CONFIG_X86_PAE
9797+ if (__supported_pte_mask & _PAGE_NX)
9798+ return pte_set_flags(pte, _PAGE_NX);
9799+ else
9800+#endif
9801+ return pte_clear_flags(pte, _PAGE_USER);
9802 }
9803
9804 static inline pte_t pte_mkdirty(pte_t pte)
9805@@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long
9806 #endif
9807
9808 #ifndef __ASSEMBLY__
9809+
9810+#ifdef CONFIG_PAX_PER_CPU_PGD
9811+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9812+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9813+{
9814+ return cpu_pgd[cpu];
9815+}
9816+#endif
9817+
9818 #include <linux/mm_types.h>
9819
9820 static inline int pte_none(pte_t pte)
9821@@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *p
9822
9823 static inline int pgd_bad(pgd_t pgd)
9824 {
9825- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9826+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9827 }
9828
9829 static inline int pgd_none(pgd_t pgd)
9830@@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
9831 * pgd_offset() returns a (pgd_t *)
9832 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9833 */
9834-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9835+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9836+
9837+#ifdef CONFIG_PAX_PER_CPU_PGD
9838+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9839+#endif
9840+
9841 /*
9842 * a shortcut which implies the use of the kernel's pgd, instead
9843 * of a process's
9844@@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
9845 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9846 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9847
9848+#ifdef CONFIG_X86_32
9849+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9850+#else
9851+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9852+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9853+
9854+#ifdef CONFIG_PAX_MEMORY_UDEREF
9855+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9856+#else
9857+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9858+#endif
9859+
9860+#endif
9861+
9862 #ifndef __ASSEMBLY__
9863
9864 extern int direct_gbpages;
9865@@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(st
9866 * dst and src can be on the same page, but the range must not overlap,
9867 * and must not cross a page boundary.
9868 */
9869-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9870+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9871 {
9872- memcpy(dst, src, count * sizeof(pgd_t));
9873+ pax_open_kernel();
9874+ while (count--)
9875+ *dst++ = *src++;
9876+ pax_close_kernel();
9877 }
9878
9879+#ifdef CONFIG_PAX_PER_CPU_PGD
9880+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9881+#endif
9882+
9883+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9884+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9885+#else
9886+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9887+#endif
9888
9889 #include <asm-generic/pgtable.h>
9890 #endif /* __ASSEMBLY__ */
9891diff -urNp linux-2.6.32.46/arch/x86/include/asm/pgtable_types.h linux-2.6.32.46/arch/x86/include/asm/pgtable_types.h
9892--- linux-2.6.32.46/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9893+++ linux-2.6.32.46/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9894@@ -16,12 +16,11 @@
9895 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9896 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9897 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9898-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9899+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9900 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9901 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9902 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9903-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9904-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9905+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9906 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9907
9908 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9909@@ -39,7 +38,6 @@
9910 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9911 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9912 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9913-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9914 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9915 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9916 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9917@@ -55,8 +53,10 @@
9918
9919 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9920 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9921-#else
9922+#elif defined(CONFIG_KMEMCHECK)
9923 #define _PAGE_NX (_AT(pteval_t, 0))
9924+#else
9925+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9926 #endif
9927
9928 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9929@@ -93,6 +93,9 @@
9930 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9931 _PAGE_ACCESSED)
9932
9933+#define PAGE_READONLY_NOEXEC PAGE_READONLY
9934+#define PAGE_SHARED_NOEXEC PAGE_SHARED
9935+
9936 #define __PAGE_KERNEL_EXEC \
9937 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9938 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9939@@ -103,8 +106,8 @@
9940 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9941 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9942 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9943-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9944-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9945+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9946+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9947 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9948 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9949 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9950@@ -163,8 +166,8 @@
9951 * bits are combined, this will alow user to access the high address mapped
9952 * VDSO in the presence of CONFIG_COMPAT_VDSO
9953 */
9954-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9955-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9956+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9957+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9958 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9959 #endif
9960
9961@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9962 {
9963 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9964 }
9965+#endif
9966
9967+#if PAGETABLE_LEVELS == 3
9968+#include <asm-generic/pgtable-nopud.h>
9969+#endif
9970+
9971+#if PAGETABLE_LEVELS == 2
9972+#include <asm-generic/pgtable-nopmd.h>
9973+#endif
9974+
9975+#ifndef __ASSEMBLY__
9976 #if PAGETABLE_LEVELS > 3
9977 typedef struct { pudval_t pud; } pud_t;
9978
9979@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9980 return pud.pud;
9981 }
9982 #else
9983-#include <asm-generic/pgtable-nopud.h>
9984-
9985 static inline pudval_t native_pud_val(pud_t pud)
9986 {
9987 return native_pgd_val(pud.pgd);
9988@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9989 return pmd.pmd;
9990 }
9991 #else
9992-#include <asm-generic/pgtable-nopmd.h>
9993-
9994 static inline pmdval_t native_pmd_val(pmd_t pmd)
9995 {
9996 return native_pgd_val(pmd.pud.pgd);
9997@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9998
9999 extern pteval_t __supported_pte_mask;
10000 extern void set_nx(void);
10001+
10002+#ifdef CONFIG_X86_32
10003+#ifdef CONFIG_X86_PAE
10004 extern int nx_enabled;
10005+#else
10006+#define nx_enabled (0)
10007+#endif
10008+#else
10009+#define nx_enabled (1)
10010+#endif
10011
10012 #define pgprot_writecombine pgprot_writecombine
10013 extern pgprot_t pgprot_writecombine(pgprot_t prot);
10014diff -urNp linux-2.6.32.46/arch/x86/include/asm/processor.h linux-2.6.32.46/arch/x86/include/asm/processor.h
10015--- linux-2.6.32.46/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
10016+++ linux-2.6.32.46/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
10017@@ -272,7 +272,7 @@ struct tss_struct {
10018
10019 } ____cacheline_aligned;
10020
10021-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
10022+extern struct tss_struct init_tss[NR_CPUS];
10023
10024 /*
10025 * Save the original ist values for checking stack pointers during debugging
10026@@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
10027 */
10028 #define TASK_SIZE PAGE_OFFSET
10029 #define TASK_SIZE_MAX TASK_SIZE
10030+
10031+#ifdef CONFIG_PAX_SEGMEXEC
10032+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
10033+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
10034+#else
10035 #define STACK_TOP TASK_SIZE
10036-#define STACK_TOP_MAX STACK_TOP
10037+#endif
10038+
10039+#define STACK_TOP_MAX TASK_SIZE
10040
10041 #define INIT_THREAD { \
10042- .sp0 = sizeof(init_stack) + (long)&init_stack, \
10043+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10044 .vm86_info = NULL, \
10045 .sysenter_cs = __KERNEL_CS, \
10046 .io_bitmap_ptr = NULL, \
10047@@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
10048 */
10049 #define INIT_TSS { \
10050 .x86_tss = { \
10051- .sp0 = sizeof(init_stack) + (long)&init_stack, \
10052+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10053 .ss0 = __KERNEL_DS, \
10054 .ss1 = __KERNEL_CS, \
10055 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
10056@@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
10057 extern unsigned long thread_saved_pc(struct task_struct *tsk);
10058
10059 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
10060-#define KSTK_TOP(info) \
10061-({ \
10062- unsigned long *__ptr = (unsigned long *)(info); \
10063- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
10064-})
10065+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
10066
10067 /*
10068 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
10069@@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
10070 #define task_pt_regs(task) \
10071 ({ \
10072 struct pt_regs *__regs__; \
10073- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
10074+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
10075 __regs__ - 1; \
10076 })
10077
10078@@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
10079 /*
10080 * User space process size. 47bits minus one guard page.
10081 */
10082-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
10083+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
10084
10085 /* This decides where the kernel will search for a free chunk of vm
10086 * space during mmap's.
10087 */
10088 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
10089- 0xc0000000 : 0xFFFFe000)
10090+ 0xc0000000 : 0xFFFFf000)
10091
10092 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
10093 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
10094@@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
10095 #define STACK_TOP_MAX TASK_SIZE_MAX
10096
10097 #define INIT_THREAD { \
10098- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10099+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10100 }
10101
10102 #define INIT_TSS { \
10103- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10104+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10105 }
10106
10107 /*
10108@@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
10109 */
10110 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
10111
10112+#ifdef CONFIG_PAX_SEGMEXEC
10113+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
10114+#endif
10115+
10116 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
10117
10118 /* Get/set a process' ability to use the timestamp counter instruction */
10119diff -urNp linux-2.6.32.46/arch/x86/include/asm/ptrace.h linux-2.6.32.46/arch/x86/include/asm/ptrace.h
10120--- linux-2.6.32.46/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
10121+++ linux-2.6.32.46/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
10122@@ -151,28 +151,29 @@ static inline unsigned long regs_return_
10123 }
10124
10125 /*
10126- * user_mode_vm(regs) determines whether a register set came from user mode.
10127+ * user_mode(regs) determines whether a register set came from user mode.
10128 * This is true if V8086 mode was enabled OR if the register set was from
10129 * protected mode with RPL-3 CS value. This tricky test checks that with
10130 * one comparison. Many places in the kernel can bypass this full check
10131- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
10132+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
10133+ * be used.
10134 */
10135-static inline int user_mode(struct pt_regs *regs)
10136+static inline int user_mode_novm(struct pt_regs *regs)
10137 {
10138 #ifdef CONFIG_X86_32
10139 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
10140 #else
10141- return !!(regs->cs & 3);
10142+ return !!(regs->cs & SEGMENT_RPL_MASK);
10143 #endif
10144 }
10145
10146-static inline int user_mode_vm(struct pt_regs *regs)
10147+static inline int user_mode(struct pt_regs *regs)
10148 {
10149 #ifdef CONFIG_X86_32
10150 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
10151 USER_RPL;
10152 #else
10153- return user_mode(regs);
10154+ return user_mode_novm(regs);
10155 #endif
10156 }
10157
10158diff -urNp linux-2.6.32.46/arch/x86/include/asm/reboot.h linux-2.6.32.46/arch/x86/include/asm/reboot.h
10159--- linux-2.6.32.46/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
10160+++ linux-2.6.32.46/arch/x86/include/asm/reboot.h 2011-08-05 20:33:55.000000000 -0400
10161@@ -6,19 +6,19 @@
10162 struct pt_regs;
10163
10164 struct machine_ops {
10165- void (*restart)(char *cmd);
10166- void (*halt)(void);
10167- void (*power_off)(void);
10168+ void (* __noreturn restart)(char *cmd);
10169+ void (* __noreturn halt)(void);
10170+ void (* __noreturn power_off)(void);
10171 void (*shutdown)(void);
10172 void (*crash_shutdown)(struct pt_regs *);
10173- void (*emergency_restart)(void);
10174-};
10175+ void (* __noreturn emergency_restart)(void);
10176+} __no_const;
10177
10178 extern struct machine_ops machine_ops;
10179
10180 void native_machine_crash_shutdown(struct pt_regs *regs);
10181 void native_machine_shutdown(void);
10182-void machine_real_restart(const unsigned char *code, int length);
10183+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
10184
10185 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
10186 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
10187diff -urNp linux-2.6.32.46/arch/x86/include/asm/rwsem.h linux-2.6.32.46/arch/x86/include/asm/rwsem.h
10188--- linux-2.6.32.46/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
10189+++ linux-2.6.32.46/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
10190@@ -118,6 +118,14 @@ static inline void __down_read(struct rw
10191 {
10192 asm volatile("# beginning down_read\n\t"
10193 LOCK_PREFIX _ASM_INC "(%1)\n\t"
10194+
10195+#ifdef CONFIG_PAX_REFCOUNT
10196+ "jno 0f\n"
10197+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
10198+ "int $4\n0:\n"
10199+ _ASM_EXTABLE(0b, 0b)
10200+#endif
10201+
10202 /* adds 0x00000001, returns the old value */
10203 " jns 1f\n"
10204 " call call_rwsem_down_read_failed\n"
10205@@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
10206 "1:\n\t"
10207 " mov %1,%2\n\t"
10208 " add %3,%2\n\t"
10209+
10210+#ifdef CONFIG_PAX_REFCOUNT
10211+ "jno 0f\n"
10212+ "sub %3,%2\n"
10213+ "int $4\n0:\n"
10214+ _ASM_EXTABLE(0b, 0b)
10215+#endif
10216+
10217 " jle 2f\n\t"
10218 LOCK_PREFIX " cmpxchg %2,%0\n\t"
10219 " jnz 1b\n\t"
10220@@ -160,6 +176,14 @@ static inline void __down_write_nested(s
10221 tmp = RWSEM_ACTIVE_WRITE_BIAS;
10222 asm volatile("# beginning down_write\n\t"
10223 LOCK_PREFIX " xadd %1,(%2)\n\t"
10224+
10225+#ifdef CONFIG_PAX_REFCOUNT
10226+ "jno 0f\n"
10227+ "mov %1,(%2)\n"
10228+ "int $4\n0:\n"
10229+ _ASM_EXTABLE(0b, 0b)
10230+#endif
10231+
10232 /* subtract 0x0000ffff, returns the old value */
10233 " test %1,%1\n\t"
10234 /* was the count 0 before? */
10235@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
10236 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
10237 asm volatile("# beginning __up_read\n\t"
10238 LOCK_PREFIX " xadd %1,(%2)\n\t"
10239+
10240+#ifdef CONFIG_PAX_REFCOUNT
10241+ "jno 0f\n"
10242+ "mov %1,(%2)\n"
10243+ "int $4\n0:\n"
10244+ _ASM_EXTABLE(0b, 0b)
10245+#endif
10246+
10247 /* subtracts 1, returns the old value */
10248 " jns 1f\n\t"
10249 " call call_rwsem_wake\n"
10250@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
10251 rwsem_count_t tmp;
10252 asm volatile("# beginning __up_write\n\t"
10253 LOCK_PREFIX " xadd %1,(%2)\n\t"
10254+
10255+#ifdef CONFIG_PAX_REFCOUNT
10256+ "jno 0f\n"
10257+ "mov %1,(%2)\n"
10258+ "int $4\n0:\n"
10259+ _ASM_EXTABLE(0b, 0b)
10260+#endif
10261+
10262 /* tries to transition
10263 0xffff0001 -> 0x00000000 */
10264 " jz 1f\n"
10265@@ -234,6 +274,14 @@ static inline void __downgrade_write(str
10266 {
10267 asm volatile("# beginning __downgrade_write\n\t"
10268 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10269+
10270+#ifdef CONFIG_PAX_REFCOUNT
10271+ "jno 0f\n"
10272+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10273+ "int $4\n0:\n"
10274+ _ASM_EXTABLE(0b, 0b)
10275+#endif
10276+
10277 /*
10278 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10279 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10280@@ -253,7 +301,15 @@ static inline void __downgrade_write(str
10281 static inline void rwsem_atomic_add(rwsem_count_t delta,
10282 struct rw_semaphore *sem)
10283 {
10284- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10285+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10286+
10287+#ifdef CONFIG_PAX_REFCOUNT
10288+ "jno 0f\n"
10289+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
10290+ "int $4\n0:\n"
10291+ _ASM_EXTABLE(0b, 0b)
10292+#endif
10293+
10294 : "+m" (sem->count)
10295 : "er" (delta));
10296 }
10297@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
10298 {
10299 rwsem_count_t tmp = delta;
10300
10301- asm volatile(LOCK_PREFIX "xadd %0,%1"
10302+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
10303+
10304+#ifdef CONFIG_PAX_REFCOUNT
10305+ "jno 0f\n"
10306+ "mov %0,%1\n"
10307+ "int $4\n0:\n"
10308+ _ASM_EXTABLE(0b, 0b)
10309+#endif
10310+
10311 : "+r" (tmp), "+m" (sem->count)
10312 : : "memory");
10313
10314diff -urNp linux-2.6.32.46/arch/x86/include/asm/segment.h linux-2.6.32.46/arch/x86/include/asm/segment.h
10315--- linux-2.6.32.46/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
10316+++ linux-2.6.32.46/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
10317@@ -62,8 +62,8 @@
10318 * 26 - ESPFIX small SS
10319 * 27 - per-cpu [ offset to per-cpu data area ]
10320 * 28 - stack_canary-20 [ for stack protector ]
10321- * 29 - unused
10322- * 30 - unused
10323+ * 29 - PCI BIOS CS
10324+ * 30 - PCI BIOS DS
10325 * 31 - TSS for double fault handler
10326 */
10327 #define GDT_ENTRY_TLS_MIN 6
10328@@ -77,6 +77,8 @@
10329
10330 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
10331
10332+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10333+
10334 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
10335
10336 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
10337@@ -88,7 +90,7 @@
10338 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
10339 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
10340
10341-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10342+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10343 #ifdef CONFIG_SMP
10344 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
10345 #else
10346@@ -102,6 +104,12 @@
10347 #define __KERNEL_STACK_CANARY 0
10348 #endif
10349
10350+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
10351+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10352+
10353+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
10354+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10355+
10356 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10357
10358 /*
10359@@ -139,7 +147,7 @@
10360 */
10361
10362 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10363-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10364+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10365
10366
10367 #else
10368@@ -163,6 +171,8 @@
10369 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
10370 #define __USER32_DS __USER_DS
10371
10372+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10373+
10374 #define GDT_ENTRY_TSS 8 /* needs two entries */
10375 #define GDT_ENTRY_LDT 10 /* needs two entries */
10376 #define GDT_ENTRY_TLS_MIN 12
10377@@ -183,6 +193,7 @@
10378 #endif
10379
10380 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
10381+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
10382 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
10383 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
10384 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
10385diff -urNp linux-2.6.32.46/arch/x86/include/asm/smp.h linux-2.6.32.46/arch/x86/include/asm/smp.h
10386--- linux-2.6.32.46/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
10387+++ linux-2.6.32.46/arch/x86/include/asm/smp.h 2011-08-05 20:33:55.000000000 -0400
10388@@ -24,7 +24,7 @@ extern unsigned int num_processors;
10389 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
10390 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10391 DECLARE_PER_CPU(u16, cpu_llc_id);
10392-DECLARE_PER_CPU(int, cpu_number);
10393+DECLARE_PER_CPU(unsigned int, cpu_number);
10394
10395 static inline struct cpumask *cpu_sibling_mask(int cpu)
10396 {
10397@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_ap
10398 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
10399
10400 /* Static state in head.S used to set up a CPU */
10401-extern struct {
10402- void *sp;
10403- unsigned short ss;
10404-} stack_start;
10405+extern unsigned long stack_start; /* Initial stack pointer address */
10406
10407 struct smp_ops {
10408 void (*smp_prepare_boot_cpu)(void);
10409@@ -60,7 +57,7 @@ struct smp_ops {
10410
10411 void (*send_call_func_ipi)(const struct cpumask *mask);
10412 void (*send_call_func_single_ipi)(int cpu);
10413-};
10414+} __no_const;
10415
10416 /* Globals due to paravirt */
10417 extern void set_cpu_sibling_map(int cpu);
10418@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitd
10419 extern int safe_smp_processor_id(void);
10420
10421 #elif defined(CONFIG_X86_64_SMP)
10422-#define raw_smp_processor_id() (percpu_read(cpu_number))
10423-
10424-#define stack_smp_processor_id() \
10425-({ \
10426- struct thread_info *ti; \
10427- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10428- ti->cpu; \
10429-})
10430+#define raw_smp_processor_id() (percpu_read(cpu_number))
10431+#define stack_smp_processor_id() raw_smp_processor_id()
10432 #define safe_smp_processor_id() smp_processor_id()
10433
10434 #endif
10435diff -urNp linux-2.6.32.46/arch/x86/include/asm/spinlock.h linux-2.6.32.46/arch/x86/include/asm/spinlock.h
10436--- linux-2.6.32.46/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
10437+++ linux-2.6.32.46/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
10438@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
10439 static inline void __raw_read_lock(raw_rwlock_t *rw)
10440 {
10441 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
10442+
10443+#ifdef CONFIG_PAX_REFCOUNT
10444+ "jno 0f\n"
10445+ LOCK_PREFIX " addl $1,(%0)\n"
10446+ "int $4\n0:\n"
10447+ _ASM_EXTABLE(0b, 0b)
10448+#endif
10449+
10450 "jns 1f\n"
10451 "call __read_lock_failed\n\t"
10452 "1:\n"
10453@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
10454 static inline void __raw_write_lock(raw_rwlock_t *rw)
10455 {
10456 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
10457+
10458+#ifdef CONFIG_PAX_REFCOUNT
10459+ "jno 0f\n"
10460+ LOCK_PREFIX " addl %1,(%0)\n"
10461+ "int $4\n0:\n"
10462+ _ASM_EXTABLE(0b, 0b)
10463+#endif
10464+
10465 "jz 1f\n"
10466 "call __write_lock_failed\n\t"
10467 "1:\n"
10468@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10469
10470 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10471 {
10472- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10473+ asm volatile(LOCK_PREFIX "incl %0\n"
10474+
10475+#ifdef CONFIG_PAX_REFCOUNT
10476+ "jno 0f\n"
10477+ LOCK_PREFIX "decl %0\n"
10478+ "int $4\n0:\n"
10479+ _ASM_EXTABLE(0b, 0b)
10480+#endif
10481+
10482+ :"+m" (rw->lock) : : "memory");
10483 }
10484
10485 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10486 {
10487- asm volatile(LOCK_PREFIX "addl %1, %0"
10488+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
10489+
10490+#ifdef CONFIG_PAX_REFCOUNT
10491+ "jno 0f\n"
10492+ LOCK_PREFIX "subl %1, %0\n"
10493+ "int $4\n0:\n"
10494+ _ASM_EXTABLE(0b, 0b)
10495+#endif
10496+
10497 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10498 }
10499
10500diff -urNp linux-2.6.32.46/arch/x86/include/asm/stackprotector.h linux-2.6.32.46/arch/x86/include/asm/stackprotector.h
10501--- linux-2.6.32.46/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
10502+++ linux-2.6.32.46/arch/x86/include/asm/stackprotector.h 2011-07-06 19:53:33.000000000 -0400
10503@@ -48,7 +48,7 @@
10504 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10505 */
10506 #define GDT_STACK_CANARY_INIT \
10507- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10508+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10509
10510 /*
10511 * Initialize the stackprotector canary value.
10512@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10513
10514 static inline void load_stack_canary_segment(void)
10515 {
10516-#ifdef CONFIG_X86_32
10517+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10518 asm volatile ("mov %0, %%gs" : : "r" (0));
10519 #endif
10520 }
10521diff -urNp linux-2.6.32.46/arch/x86/include/asm/system.h linux-2.6.32.46/arch/x86/include/asm/system.h
10522--- linux-2.6.32.46/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
10523+++ linux-2.6.32.46/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
10524@@ -132,7 +132,7 @@ do { \
10525 "thread_return:\n\t" \
10526 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10527 __switch_canary \
10528- "movq %P[thread_info](%%rsi),%%r8\n\t" \
10529+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10530 "movq %%rax,%%rdi\n\t" \
10531 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10532 "jnz ret_from_fork\n\t" \
10533@@ -143,7 +143,7 @@ do { \
10534 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10535 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10536 [_tif_fork] "i" (_TIF_FORK), \
10537- [thread_info] "i" (offsetof(struct task_struct, stack)), \
10538+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
10539 [current_task] "m" (per_cpu_var(current_task)) \
10540 __switch_canary_iparam \
10541 : "memory", "cc" __EXTRA_CLOBBER)
10542@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10543 {
10544 unsigned long __limit;
10545 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10546- return __limit + 1;
10547+ return __limit;
10548 }
10549
10550 static inline void native_clts(void)
10551@@ -340,12 +340,12 @@ void enable_hlt(void);
10552
10553 void cpu_idle_wait(void);
10554
10555-extern unsigned long arch_align_stack(unsigned long sp);
10556+#define arch_align_stack(x) ((x) & ~0xfUL)
10557 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10558
10559 void default_idle(void);
10560
10561-void stop_this_cpu(void *dummy);
10562+void stop_this_cpu(void *dummy) __noreturn;
10563
10564 /*
10565 * Force strict CPU ordering.
10566diff -urNp linux-2.6.32.46/arch/x86/include/asm/thread_info.h linux-2.6.32.46/arch/x86/include/asm/thread_info.h
10567--- linux-2.6.32.46/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10568+++ linux-2.6.32.46/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10569@@ -10,6 +10,7 @@
10570 #include <linux/compiler.h>
10571 #include <asm/page.h>
10572 #include <asm/types.h>
10573+#include <asm/percpu.h>
10574
10575 /*
10576 * low level task data that entry.S needs immediate access to
10577@@ -24,7 +25,6 @@ struct exec_domain;
10578 #include <asm/atomic.h>
10579
10580 struct thread_info {
10581- struct task_struct *task; /* main task structure */
10582 struct exec_domain *exec_domain; /* execution domain */
10583 __u32 flags; /* low level flags */
10584 __u32 status; /* thread synchronous flags */
10585@@ -34,18 +34,12 @@ struct thread_info {
10586 mm_segment_t addr_limit;
10587 struct restart_block restart_block;
10588 void __user *sysenter_return;
10589-#ifdef CONFIG_X86_32
10590- unsigned long previous_esp; /* ESP of the previous stack in
10591- case of nested (IRQ) stacks
10592- */
10593- __u8 supervisor_stack[0];
10594-#endif
10595+ unsigned long lowest_stack;
10596 int uaccess_err;
10597 };
10598
10599-#define INIT_THREAD_INFO(tsk) \
10600+#define INIT_THREAD_INFO \
10601 { \
10602- .task = &tsk, \
10603 .exec_domain = &default_exec_domain, \
10604 .flags = 0, \
10605 .cpu = 0, \
10606@@ -56,7 +50,7 @@ struct thread_info {
10607 }, \
10608 }
10609
10610-#define init_thread_info (init_thread_union.thread_info)
10611+#define init_thread_info (init_thread_union.stack)
10612 #define init_stack (init_thread_union.stack)
10613
10614 #else /* !__ASSEMBLY__ */
10615@@ -163,6 +157,23 @@ struct thread_info {
10616 #define alloc_thread_info(tsk) \
10617 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10618
10619+#ifdef __ASSEMBLY__
10620+/* how to get the thread information struct from ASM */
10621+#define GET_THREAD_INFO(reg) \
10622+ mov PER_CPU_VAR(current_tinfo), reg
10623+
10624+/* use this one if reg already contains %esp */
10625+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10626+#else
10627+/* how to get the thread information struct from C */
10628+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10629+
10630+static __always_inline struct thread_info *current_thread_info(void)
10631+{
10632+ return percpu_read_stable(current_tinfo);
10633+}
10634+#endif
10635+
10636 #ifdef CONFIG_X86_32
10637
10638 #define STACK_WARN (THREAD_SIZE/8)
10639@@ -173,35 +184,13 @@ struct thread_info {
10640 */
10641 #ifndef __ASSEMBLY__
10642
10643-
10644 /* how to get the current stack pointer from C */
10645 register unsigned long current_stack_pointer asm("esp") __used;
10646
10647-/* how to get the thread information struct from C */
10648-static inline struct thread_info *current_thread_info(void)
10649-{
10650- return (struct thread_info *)
10651- (current_stack_pointer & ~(THREAD_SIZE - 1));
10652-}
10653-
10654-#else /* !__ASSEMBLY__ */
10655-
10656-/* how to get the thread information struct from ASM */
10657-#define GET_THREAD_INFO(reg) \
10658- movl $-THREAD_SIZE, reg; \
10659- andl %esp, reg
10660-
10661-/* use this one if reg already contains %esp */
10662-#define GET_THREAD_INFO_WITH_ESP(reg) \
10663- andl $-THREAD_SIZE, reg
10664-
10665 #endif
10666
10667 #else /* X86_32 */
10668
10669-#include <asm/percpu.h>
10670-#define KERNEL_STACK_OFFSET (5*8)
10671-
10672 /*
10673 * macros/functions for gaining access to the thread information structure
10674 * preempt_count needs to be 1 initially, until the scheduler is functional.
10675@@ -209,21 +198,8 @@ static inline struct thread_info *curren
10676 #ifndef __ASSEMBLY__
10677 DECLARE_PER_CPU(unsigned long, kernel_stack);
10678
10679-static inline struct thread_info *current_thread_info(void)
10680-{
10681- struct thread_info *ti;
10682- ti = (void *)(percpu_read_stable(kernel_stack) +
10683- KERNEL_STACK_OFFSET - THREAD_SIZE);
10684- return ti;
10685-}
10686-
10687-#else /* !__ASSEMBLY__ */
10688-
10689-/* how to get the thread information struct from ASM */
10690-#define GET_THREAD_INFO(reg) \
10691- movq PER_CPU_VAR(kernel_stack),reg ; \
10692- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10693-
10694+/* how to get the current stack pointer from C */
10695+register unsigned long current_stack_pointer asm("rsp") __used;
10696 #endif
10697
10698 #endif /* !X86_32 */
10699@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10700 extern void free_thread_info(struct thread_info *ti);
10701 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10702 #define arch_task_cache_init arch_task_cache_init
10703+
10704+#define __HAVE_THREAD_FUNCTIONS
10705+#define task_thread_info(task) (&(task)->tinfo)
10706+#define task_stack_page(task) ((task)->stack)
10707+#define setup_thread_stack(p, org) do {} while (0)
10708+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10709+
10710+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10711+extern struct task_struct *alloc_task_struct(void);
10712+extern void free_task_struct(struct task_struct *);
10713+
10714 #endif
10715 #endif /* _ASM_X86_THREAD_INFO_H */
10716diff -urNp linux-2.6.32.46/arch/x86/include/asm/uaccess_32.h linux-2.6.32.46/arch/x86/include/asm/uaccess_32.h
10717--- linux-2.6.32.46/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10718+++ linux-2.6.32.46/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10719@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10720 static __always_inline unsigned long __must_check
10721 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10722 {
10723+ pax_track_stack();
10724+
10725+ if ((long)n < 0)
10726+ return n;
10727+
10728 if (__builtin_constant_p(n)) {
10729 unsigned long ret;
10730
10731@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10732 return ret;
10733 }
10734 }
10735+ if (!__builtin_constant_p(n))
10736+ check_object_size(from, n, true);
10737 return __copy_to_user_ll(to, from, n);
10738 }
10739
10740@@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10741 __copy_to_user(void __user *to, const void *from, unsigned long n)
10742 {
10743 might_fault();
10744+
10745 return __copy_to_user_inatomic(to, from, n);
10746 }
10747
10748 static __always_inline unsigned long
10749 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10750 {
10751+ if ((long)n < 0)
10752+ return n;
10753+
10754 /* Avoid zeroing the tail if the copy fails..
10755 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10756 * but as the zeroing behaviour is only significant when n is not
10757@@ -138,6 +149,12 @@ static __always_inline unsigned long
10758 __copy_from_user(void *to, const void __user *from, unsigned long n)
10759 {
10760 might_fault();
10761+
10762+ pax_track_stack();
10763+
10764+ if ((long)n < 0)
10765+ return n;
10766+
10767 if (__builtin_constant_p(n)) {
10768 unsigned long ret;
10769
10770@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10771 return ret;
10772 }
10773 }
10774+ if (!__builtin_constant_p(n))
10775+ check_object_size(to, n, false);
10776 return __copy_from_user_ll(to, from, n);
10777 }
10778
10779@@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10780 const void __user *from, unsigned long n)
10781 {
10782 might_fault();
10783+
10784+ if ((long)n < 0)
10785+ return n;
10786+
10787 if (__builtin_constant_p(n)) {
10788 unsigned long ret;
10789
10790@@ -182,14 +205,62 @@ static __always_inline unsigned long
10791 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10792 unsigned long n)
10793 {
10794- return __copy_from_user_ll_nocache_nozero(to, from, n);
10795+ if ((long)n < 0)
10796+ return n;
10797+
10798+ return __copy_from_user_ll_nocache_nozero(to, from, n);
10799+}
10800+
10801+/**
10802+ * copy_to_user: - Copy a block of data into user space.
10803+ * @to: Destination address, in user space.
10804+ * @from: Source address, in kernel space.
10805+ * @n: Number of bytes to copy.
10806+ *
10807+ * Context: User context only. This function may sleep.
10808+ *
10809+ * Copy data from kernel space to user space.
10810+ *
10811+ * Returns number of bytes that could not be copied.
10812+ * On success, this will be zero.
10813+ */
10814+static __always_inline unsigned long __must_check
10815+copy_to_user(void __user *to, const void *from, unsigned long n)
10816+{
10817+ if (access_ok(VERIFY_WRITE, to, n))
10818+ n = __copy_to_user(to, from, n);
10819+ return n;
10820+}
10821+
10822+/**
10823+ * copy_from_user: - Copy a block of data from user space.
10824+ * @to: Destination address, in kernel space.
10825+ * @from: Source address, in user space.
10826+ * @n: Number of bytes to copy.
10827+ *
10828+ * Context: User context only. This function may sleep.
10829+ *
10830+ * Copy data from user space to kernel space.
10831+ *
10832+ * Returns number of bytes that could not be copied.
10833+ * On success, this will be zero.
10834+ *
10835+ * If some data could not be copied, this function will pad the copied
10836+ * data to the requested size using zero bytes.
10837+ */
10838+static __always_inline unsigned long __must_check
10839+copy_from_user(void *to, const void __user *from, unsigned long n)
10840+{
10841+ if (access_ok(VERIFY_READ, from, n))
10842+ n = __copy_from_user(to, from, n);
10843+ else if ((long)n > 0) {
10844+ if (!__builtin_constant_p(n))
10845+ check_object_size(to, n, false);
10846+ memset(to, 0, n);
10847+ }
10848+ return n;
10849 }
10850
10851-unsigned long __must_check copy_to_user(void __user *to,
10852- const void *from, unsigned long n);
10853-unsigned long __must_check copy_from_user(void *to,
10854- const void __user *from,
10855- unsigned long n);
10856 long __must_check strncpy_from_user(char *dst, const char __user *src,
10857 long count);
10858 long __must_check __strncpy_from_user(char *dst,
10859diff -urNp linux-2.6.32.46/arch/x86/include/asm/uaccess_64.h linux-2.6.32.46/arch/x86/include/asm/uaccess_64.h
10860--- linux-2.6.32.46/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10861+++ linux-2.6.32.46/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10862@@ -9,6 +9,9 @@
10863 #include <linux/prefetch.h>
10864 #include <linux/lockdep.h>
10865 #include <asm/page.h>
10866+#include <asm/pgtable.h>
10867+
10868+#define set_fs(x) (current_thread_info()->addr_limit = (x))
10869
10870 /*
10871 * Copy To/From Userspace
10872@@ -19,113 +22,203 @@ __must_check unsigned long
10873 copy_user_generic(void *to, const void *from, unsigned len);
10874
10875 __must_check unsigned long
10876-copy_to_user(void __user *to, const void *from, unsigned len);
10877-__must_check unsigned long
10878-copy_from_user(void *to, const void __user *from, unsigned len);
10879-__must_check unsigned long
10880 copy_in_user(void __user *to, const void __user *from, unsigned len);
10881
10882 static __always_inline __must_check
10883-int __copy_from_user(void *dst, const void __user *src, unsigned size)
10884+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10885 {
10886- int ret = 0;
10887+ unsigned ret = 0;
10888
10889 might_fault();
10890- if (!__builtin_constant_p(size))
10891- return copy_user_generic(dst, (__force void *)src, size);
10892+
10893+ if ((int)size < 0)
10894+ return size;
10895+
10896+#ifdef CONFIG_PAX_MEMORY_UDEREF
10897+ if (!__access_ok(VERIFY_READ, src, size))
10898+ return size;
10899+#endif
10900+
10901+ if (!__builtin_constant_p(size)) {
10902+ check_object_size(dst, size, false);
10903+
10904+#ifdef CONFIG_PAX_MEMORY_UDEREF
10905+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10906+ src += PAX_USER_SHADOW_BASE;
10907+#endif
10908+
10909+ return copy_user_generic(dst, (__force const void *)src, size);
10910+ }
10911 switch (size) {
10912- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10913+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10914 ret, "b", "b", "=q", 1);
10915 return ret;
10916- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10917+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10918 ret, "w", "w", "=r", 2);
10919 return ret;
10920- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10921+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10922 ret, "l", "k", "=r", 4);
10923 return ret;
10924- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10925+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10926 ret, "q", "", "=r", 8);
10927 return ret;
10928 case 10:
10929- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10930+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10931 ret, "q", "", "=r", 10);
10932 if (unlikely(ret))
10933 return ret;
10934 __get_user_asm(*(u16 *)(8 + (char *)dst),
10935- (u16 __user *)(8 + (char __user *)src),
10936+ (const u16 __user *)(8 + (const char __user *)src),
10937 ret, "w", "w", "=r", 2);
10938 return ret;
10939 case 16:
10940- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10941+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10942 ret, "q", "", "=r", 16);
10943 if (unlikely(ret))
10944 return ret;
10945 __get_user_asm(*(u64 *)(8 + (char *)dst),
10946- (u64 __user *)(8 + (char __user *)src),
10947+ (const u64 __user *)(8 + (const char __user *)src),
10948 ret, "q", "", "=r", 8);
10949 return ret;
10950 default:
10951- return copy_user_generic(dst, (__force void *)src, size);
10952+
10953+#ifdef CONFIG_PAX_MEMORY_UDEREF
10954+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10955+ src += PAX_USER_SHADOW_BASE;
10956+#endif
10957+
10958+ return copy_user_generic(dst, (__force const void *)src, size);
10959 }
10960 }
10961
10962 static __always_inline __must_check
10963-int __copy_to_user(void __user *dst, const void *src, unsigned size)
10964+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10965 {
10966- int ret = 0;
10967+ unsigned ret = 0;
10968
10969 might_fault();
10970- if (!__builtin_constant_p(size))
10971+
10972+ pax_track_stack();
10973+
10974+ if ((int)size < 0)
10975+ return size;
10976+
10977+#ifdef CONFIG_PAX_MEMORY_UDEREF
10978+ if (!__access_ok(VERIFY_WRITE, dst, size))
10979+ return size;
10980+#endif
10981+
10982+ if (!__builtin_constant_p(size)) {
10983+ check_object_size(src, size, true);
10984+
10985+#ifdef CONFIG_PAX_MEMORY_UDEREF
10986+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10987+ dst += PAX_USER_SHADOW_BASE;
10988+#endif
10989+
10990 return copy_user_generic((__force void *)dst, src, size);
10991+ }
10992 switch (size) {
10993- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10994+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10995 ret, "b", "b", "iq", 1);
10996 return ret;
10997- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10998+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10999 ret, "w", "w", "ir", 2);
11000 return ret;
11001- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11002+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11003 ret, "l", "k", "ir", 4);
11004 return ret;
11005- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11006+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11007 ret, "q", "", "er", 8);
11008 return ret;
11009 case 10:
11010- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11011+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11012 ret, "q", "", "er", 10);
11013 if (unlikely(ret))
11014 return ret;
11015 asm("":::"memory");
11016- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11017+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11018 ret, "w", "w", "ir", 2);
11019 return ret;
11020 case 16:
11021- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11022+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11023 ret, "q", "", "er", 16);
11024 if (unlikely(ret))
11025 return ret;
11026 asm("":::"memory");
11027- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11028+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11029 ret, "q", "", "er", 8);
11030 return ret;
11031 default:
11032+
11033+#ifdef CONFIG_PAX_MEMORY_UDEREF
11034+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11035+ dst += PAX_USER_SHADOW_BASE;
11036+#endif
11037+
11038 return copy_user_generic((__force void *)dst, src, size);
11039 }
11040 }
11041
11042 static __always_inline __must_check
11043-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11044+unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
11045+{
11046+ if (access_ok(VERIFY_WRITE, to, len))
11047+ len = __copy_to_user(to, from, len);
11048+ return len;
11049+}
11050+
11051+static __always_inline __must_check
11052+unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
11053+{
11054+ if ((int)len < 0)
11055+ return len;
11056+
11057+ if (access_ok(VERIFY_READ, from, len))
11058+ len = __copy_from_user(to, from, len);
11059+ else if ((int)len > 0) {
11060+ if (!__builtin_constant_p(len))
11061+ check_object_size(to, len, false);
11062+ memset(to, 0, len);
11063+ }
11064+ return len;
11065+}
11066+
11067+static __always_inline __must_check
11068+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11069 {
11070- int ret = 0;
11071+ unsigned ret = 0;
11072
11073 might_fault();
11074- if (!__builtin_constant_p(size))
11075+
11076+ pax_track_stack();
11077+
11078+ if ((int)size < 0)
11079+ return size;
11080+
11081+#ifdef CONFIG_PAX_MEMORY_UDEREF
11082+ if (!__access_ok(VERIFY_READ, src, size))
11083+ return size;
11084+ if (!__access_ok(VERIFY_WRITE, dst, size))
11085+ return size;
11086+#endif
11087+
11088+ if (!__builtin_constant_p(size)) {
11089+
11090+#ifdef CONFIG_PAX_MEMORY_UDEREF
11091+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11092+ src += PAX_USER_SHADOW_BASE;
11093+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11094+ dst += PAX_USER_SHADOW_BASE;
11095+#endif
11096+
11097 return copy_user_generic((__force void *)dst,
11098- (__force void *)src, size);
11099+ (__force const void *)src, size);
11100+ }
11101 switch (size) {
11102 case 1: {
11103 u8 tmp;
11104- __get_user_asm(tmp, (u8 __user *)src,
11105+ __get_user_asm(tmp, (const u8 __user *)src,
11106 ret, "b", "b", "=q", 1);
11107 if (likely(!ret))
11108 __put_user_asm(tmp, (u8 __user *)dst,
11109@@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
11110 }
11111 case 2: {
11112 u16 tmp;
11113- __get_user_asm(tmp, (u16 __user *)src,
11114+ __get_user_asm(tmp, (const u16 __user *)src,
11115 ret, "w", "w", "=r", 2);
11116 if (likely(!ret))
11117 __put_user_asm(tmp, (u16 __user *)dst,
11118@@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
11119
11120 case 4: {
11121 u32 tmp;
11122- __get_user_asm(tmp, (u32 __user *)src,
11123+ __get_user_asm(tmp, (const u32 __user *)src,
11124 ret, "l", "k", "=r", 4);
11125 if (likely(!ret))
11126 __put_user_asm(tmp, (u32 __user *)dst,
11127@@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
11128 }
11129 case 8: {
11130 u64 tmp;
11131- __get_user_asm(tmp, (u64 __user *)src,
11132+ __get_user_asm(tmp, (const u64 __user *)src,
11133 ret, "q", "", "=r", 8);
11134 if (likely(!ret))
11135 __put_user_asm(tmp, (u64 __user *)dst,
11136@@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
11137 return ret;
11138 }
11139 default:
11140+
11141+#ifdef CONFIG_PAX_MEMORY_UDEREF
11142+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11143+ src += PAX_USER_SHADOW_BASE;
11144+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11145+ dst += PAX_USER_SHADOW_BASE;
11146+#endif
11147+
11148 return copy_user_generic((__force void *)dst,
11149- (__force void *)src, size);
11150+ (__force const void *)src, size);
11151 }
11152 }
11153
11154@@ -176,33 +277,75 @@ __must_check long strlen_user(const char
11155 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11156 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11157
11158-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
11159- unsigned size);
11160+static __must_check __always_inline unsigned long
11161+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11162+{
11163+ pax_track_stack();
11164+
11165+ if ((int)size < 0)
11166+ return size;
11167
11168-static __must_check __always_inline int
11169+#ifdef CONFIG_PAX_MEMORY_UDEREF
11170+ if (!__access_ok(VERIFY_READ, src, size))
11171+ return size;
11172+
11173+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11174+ src += PAX_USER_SHADOW_BASE;
11175+#endif
11176+
11177+ return copy_user_generic(dst, (__force const void *)src, size);
11178+}
11179+
11180+static __must_check __always_inline unsigned long
11181 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11182 {
11183+ if ((int)size < 0)
11184+ return size;
11185+
11186+#ifdef CONFIG_PAX_MEMORY_UDEREF
11187+ if (!__access_ok(VERIFY_WRITE, dst, size))
11188+ return size;
11189+
11190+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11191+ dst += PAX_USER_SHADOW_BASE;
11192+#endif
11193+
11194 return copy_user_generic((__force void *)dst, src, size);
11195 }
11196
11197-extern long __copy_user_nocache(void *dst, const void __user *src,
11198+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11199 unsigned size, int zerorest);
11200
11201-static inline int
11202-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11203+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11204 {
11205 might_sleep();
11206+
11207+ if ((int)size < 0)
11208+ return size;
11209+
11210+#ifdef CONFIG_PAX_MEMORY_UDEREF
11211+ if (!__access_ok(VERIFY_READ, src, size))
11212+ return size;
11213+#endif
11214+
11215 return __copy_user_nocache(dst, src, size, 1);
11216 }
11217
11218-static inline int
11219-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11220+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11221 unsigned size)
11222 {
11223+ if ((int)size < 0)
11224+ return size;
11225+
11226+#ifdef CONFIG_PAX_MEMORY_UDEREF
11227+ if (!__access_ok(VERIFY_READ, src, size))
11228+ return size;
11229+#endif
11230+
11231 return __copy_user_nocache(dst, src, size, 0);
11232 }
11233
11234-unsigned long
11235+extern unsigned long
11236 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11237
11238 #endif /* _ASM_X86_UACCESS_64_H */
11239diff -urNp linux-2.6.32.46/arch/x86/include/asm/uaccess.h linux-2.6.32.46/arch/x86/include/asm/uaccess.h
11240--- linux-2.6.32.46/arch/x86/include/asm/uaccess.h 2011-06-25 12:55:34.000000000 -0400
11241+++ linux-2.6.32.46/arch/x86/include/asm/uaccess.h 2011-06-25 12:56:37.000000000 -0400
11242@@ -8,12 +8,15 @@
11243 #include <linux/thread_info.h>
11244 #include <linux/prefetch.h>
11245 #include <linux/string.h>
11246+#include <linux/sched.h>
11247 #include <asm/asm.h>
11248 #include <asm/page.h>
11249
11250 #define VERIFY_READ 0
11251 #define VERIFY_WRITE 1
11252
11253+extern void check_object_size(const void *ptr, unsigned long n, bool to);
11254+
11255 /*
11256 * The fs value determines whether argument validity checking should be
11257 * performed or not. If get_fs() == USER_DS, checking is performed, with
11258@@ -29,7 +32,12 @@
11259
11260 #define get_ds() (KERNEL_DS)
11261 #define get_fs() (current_thread_info()->addr_limit)
11262+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11263+void __set_fs(mm_segment_t x);
11264+void set_fs(mm_segment_t x);
11265+#else
11266 #define set_fs(x) (current_thread_info()->addr_limit = (x))
11267+#endif
11268
11269 #define segment_eq(a, b) ((a).seg == (b).seg)
11270
11271@@ -77,7 +85,33 @@
11272 * checks that the pointer is in the user space range - after calling
11273 * this function, memory access functions may still return -EFAULT.
11274 */
11275-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11276+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11277+#define access_ok(type, addr, size) \
11278+({ \
11279+ long __size = size; \
11280+ unsigned long __addr = (unsigned long)addr; \
11281+ unsigned long __addr_ao = __addr & PAGE_MASK; \
11282+ unsigned long __end_ao = __addr + __size - 1; \
11283+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
11284+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
11285+ while(__addr_ao <= __end_ao) { \
11286+ char __c_ao; \
11287+ __addr_ao += PAGE_SIZE; \
11288+ if (__size > PAGE_SIZE) \
11289+ cond_resched(); \
11290+ if (__get_user(__c_ao, (char __user *)__addr)) \
11291+ break; \
11292+ if (type != VERIFY_WRITE) { \
11293+ __addr = __addr_ao; \
11294+ continue; \
11295+ } \
11296+ if (__put_user(__c_ao, (char __user *)__addr)) \
11297+ break; \
11298+ __addr = __addr_ao; \
11299+ } \
11300+ } \
11301+ __ret_ao; \
11302+})
11303
11304 /*
11305 * The exception table consists of pairs of addresses: the first is the
11306@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
11307 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
11308 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
11309
11310-
11311+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11312+#define __copyuser_seg "gs;"
11313+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
11314+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
11315+#else
11316+#define __copyuser_seg
11317+#define __COPYUSER_SET_ES
11318+#define __COPYUSER_RESTORE_ES
11319+#endif
11320
11321 #ifdef CONFIG_X86_32
11322 #define __put_user_asm_u64(x, addr, err, errret) \
11323- asm volatile("1: movl %%eax,0(%2)\n" \
11324- "2: movl %%edx,4(%2)\n" \
11325+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
11326+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
11327 "3:\n" \
11328 ".section .fixup,\"ax\"\n" \
11329 "4: movl %3,%0\n" \
11330@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
11331 : "A" (x), "r" (addr), "i" (errret), "0" (err))
11332
11333 #define __put_user_asm_ex_u64(x, addr) \
11334- asm volatile("1: movl %%eax,0(%1)\n" \
11335- "2: movl %%edx,4(%1)\n" \
11336+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
11337+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
11338 "3:\n" \
11339 _ASM_EXTABLE(1b, 2b - 1b) \
11340 _ASM_EXTABLE(2b, 3b - 2b) \
11341@@ -374,7 +416,7 @@ do { \
11342 } while (0)
11343
11344 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11345- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
11346+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
11347 "2:\n" \
11348 ".section .fixup,\"ax\"\n" \
11349 "3: mov %3,%0\n" \
11350@@ -382,7 +424,7 @@ do { \
11351 " jmp 2b\n" \
11352 ".previous\n" \
11353 _ASM_EXTABLE(1b, 3b) \
11354- : "=r" (err), ltype(x) \
11355+ : "=r" (err), ltype (x) \
11356 : "m" (__m(addr)), "i" (errret), "0" (err))
11357
11358 #define __get_user_size_ex(x, ptr, size) \
11359@@ -407,7 +449,7 @@ do { \
11360 } while (0)
11361
11362 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
11363- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
11364+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
11365 "2:\n" \
11366 _ASM_EXTABLE(1b, 2b - 1b) \
11367 : ltype(x) : "m" (__m(addr)))
11368@@ -424,13 +466,24 @@ do { \
11369 int __gu_err; \
11370 unsigned long __gu_val; \
11371 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11372- (x) = (__force __typeof__(*(ptr)))__gu_val; \
11373+ (x) = (__typeof__(*(ptr)))__gu_val; \
11374 __gu_err; \
11375 })
11376
11377 /* FIXME: this hack is definitely wrong -AK */
11378 struct __large_struct { unsigned long buf[100]; };
11379-#define __m(x) (*(struct __large_struct __user *)(x))
11380+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11381+#define ____m(x) \
11382+({ \
11383+ unsigned long ____x = (unsigned long)(x); \
11384+ if (____x < PAX_USER_SHADOW_BASE) \
11385+ ____x += PAX_USER_SHADOW_BASE; \
11386+ (void __user *)____x; \
11387+})
11388+#else
11389+#define ____m(x) (x)
11390+#endif
11391+#define __m(x) (*(struct __large_struct __user *)____m(x))
11392
11393 /*
11394 * Tell gcc we read from memory instead of writing: this is because
11395@@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
11396 * aliasing issues.
11397 */
11398 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11399- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
11400+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
11401 "2:\n" \
11402 ".section .fixup,\"ax\"\n" \
11403 "3: mov %3,%0\n" \
11404@@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
11405 ".previous\n" \
11406 _ASM_EXTABLE(1b, 3b) \
11407 : "=r"(err) \
11408- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
11409+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
11410
11411 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
11412- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
11413+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
11414 "2:\n" \
11415 _ASM_EXTABLE(1b, 2b - 1b) \
11416 : : ltype(x), "m" (__m(addr)))
11417@@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
11418 * On error, the variable @x is set to zero.
11419 */
11420
11421+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11422+#define __get_user(x, ptr) get_user((x), (ptr))
11423+#else
11424 #define __get_user(x, ptr) \
11425 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
11426+#endif
11427
11428 /**
11429 * __put_user: - Write a simple value into user space, with less checking.
11430@@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
11431 * Returns zero on success, or -EFAULT on error.
11432 */
11433
11434+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11435+#define __put_user(x, ptr) put_user((x), (ptr))
11436+#else
11437 #define __put_user(x, ptr) \
11438 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
11439+#endif
11440
11441 #define __get_user_unaligned __get_user
11442 #define __put_user_unaligned __put_user
11443@@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
11444 #define get_user_ex(x, ptr) do { \
11445 unsigned long __gue_val; \
11446 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
11447- (x) = (__force __typeof__(*(ptr)))__gue_val; \
11448+ (x) = (__typeof__(*(ptr)))__gue_val; \
11449 } while (0)
11450
11451 #ifdef CONFIG_X86_WP_WORKS_OK
11452@@ -567,6 +628,7 @@ extern struct movsl_mask {
11453
11454 #define ARCH_HAS_NOCACHE_UACCESS 1
11455
11456+#define ARCH_HAS_SORT_EXTABLE
11457 #ifdef CONFIG_X86_32
11458 # include "uaccess_32.h"
11459 #else
11460diff -urNp linux-2.6.32.46/arch/x86/include/asm/vgtod.h linux-2.6.32.46/arch/x86/include/asm/vgtod.h
11461--- linux-2.6.32.46/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
11462+++ linux-2.6.32.46/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
11463@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
11464 int sysctl_enabled;
11465 struct timezone sys_tz;
11466 struct { /* extract of a clocksource struct */
11467+ char name[8];
11468 cycle_t (*vread)(void);
11469 cycle_t cycle_last;
11470 cycle_t mask;
11471diff -urNp linux-2.6.32.46/arch/x86/include/asm/vmi.h linux-2.6.32.46/arch/x86/include/asm/vmi.h
11472--- linux-2.6.32.46/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
11473+++ linux-2.6.32.46/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
11474@@ -191,6 +191,7 @@ struct vrom_header {
11475 u8 reserved[96]; /* Reserved for headers */
11476 char vmi_init[8]; /* VMI_Init jump point */
11477 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11478+ char rom_data[8048]; /* rest of the option ROM */
11479 } __attribute__((packed));
11480
11481 struct pnp_header {
11482diff -urNp linux-2.6.32.46/arch/x86/include/asm/vmi_time.h linux-2.6.32.46/arch/x86/include/asm/vmi_time.h
11483--- linux-2.6.32.46/arch/x86/include/asm/vmi_time.h 2011-03-27 14:31:47.000000000 -0400
11484+++ linux-2.6.32.46/arch/x86/include/asm/vmi_time.h 2011-08-05 20:33:55.000000000 -0400
11485@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
11486 int (*wallclock_updated)(void);
11487 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
11488 void (*cancel_alarm)(u32 flags);
11489-} vmi_timer_ops;
11490+} __no_const vmi_timer_ops;
11491
11492 /* Prototypes */
11493 extern void __init vmi_time_init(void);
11494diff -urNp linux-2.6.32.46/arch/x86/include/asm/vsyscall.h linux-2.6.32.46/arch/x86/include/asm/vsyscall.h
11495--- linux-2.6.32.46/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
11496+++ linux-2.6.32.46/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
11497@@ -15,9 +15,10 @@ enum vsyscall_num {
11498
11499 #ifdef __KERNEL__
11500 #include <linux/seqlock.h>
11501+#include <linux/getcpu.h>
11502+#include <linux/time.h>
11503
11504 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11505-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11506
11507 /* Definitions for CONFIG_GENERIC_TIME definitions */
11508 #define __section_vsyscall_gtod_data __attribute__ \
11509@@ -31,7 +32,6 @@ enum vsyscall_num {
11510 #define VGETCPU_LSL 2
11511
11512 extern int __vgetcpu_mode;
11513-extern volatile unsigned long __jiffies;
11514
11515 /* kernel space (writeable) */
11516 extern int vgetcpu_mode;
11517@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11518
11519 extern void map_vsyscall(void);
11520
11521+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11522+extern time_t vtime(time_t *t);
11523+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11524 #endif /* __KERNEL__ */
11525
11526 #endif /* _ASM_X86_VSYSCALL_H */
11527diff -urNp linux-2.6.32.46/arch/x86/include/asm/x86_init.h linux-2.6.32.46/arch/x86/include/asm/x86_init.h
11528--- linux-2.6.32.46/arch/x86/include/asm/x86_init.h 2011-03-27 14:31:47.000000000 -0400
11529+++ linux-2.6.32.46/arch/x86/include/asm/x86_init.h 2011-08-05 20:33:55.000000000 -0400
11530@@ -28,7 +28,7 @@ struct x86_init_mpparse {
11531 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11532 void (*find_smp_config)(unsigned int reserve);
11533 void (*get_smp_config)(unsigned int early);
11534-};
11535+} __no_const;
11536
11537 /**
11538 * struct x86_init_resources - platform specific resource related ops
11539@@ -42,7 +42,7 @@ struct x86_init_resources {
11540 void (*probe_roms)(void);
11541 void (*reserve_resources)(void);
11542 char *(*memory_setup)(void);
11543-};
11544+} __no_const;
11545
11546 /**
11547 * struct x86_init_irqs - platform specific interrupt setup
11548@@ -55,7 +55,7 @@ struct x86_init_irqs {
11549 void (*pre_vector_init)(void);
11550 void (*intr_init)(void);
11551 void (*trap_init)(void);
11552-};
11553+} __no_const;
11554
11555 /**
11556 * struct x86_init_oem - oem platform specific customizing functions
11557@@ -65,7 +65,7 @@ struct x86_init_irqs {
11558 struct x86_init_oem {
11559 void (*arch_setup)(void);
11560 void (*banner)(void);
11561-};
11562+} __no_const;
11563
11564 /**
11565 * struct x86_init_paging - platform specific paging functions
11566@@ -75,7 +75,7 @@ struct x86_init_oem {
11567 struct x86_init_paging {
11568 void (*pagetable_setup_start)(pgd_t *base);
11569 void (*pagetable_setup_done)(pgd_t *base);
11570-};
11571+} __no_const;
11572
11573 /**
11574 * struct x86_init_timers - platform specific timer setup
11575@@ -88,7 +88,7 @@ struct x86_init_timers {
11576 void (*setup_percpu_clockev)(void);
11577 void (*tsc_pre_init)(void);
11578 void (*timer_init)(void);
11579-};
11580+} __no_const;
11581
11582 /**
11583 * struct x86_init_ops - functions for platform specific setup
11584@@ -101,7 +101,7 @@ struct x86_init_ops {
11585 struct x86_init_oem oem;
11586 struct x86_init_paging paging;
11587 struct x86_init_timers timers;
11588-};
11589+} __no_const;
11590
11591 /**
11592 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11593@@ -109,7 +109,7 @@ struct x86_init_ops {
11594 */
11595 struct x86_cpuinit_ops {
11596 void (*setup_percpu_clockev)(void);
11597-};
11598+} __no_const;
11599
11600 /**
11601 * struct x86_platform_ops - platform specific runtime functions
11602@@ -121,7 +121,7 @@ struct x86_platform_ops {
11603 unsigned long (*calibrate_tsc)(void);
11604 unsigned long (*get_wallclock)(void);
11605 int (*set_wallclock)(unsigned long nowtime);
11606-};
11607+} __no_const;
11608
11609 extern struct x86_init_ops x86_init;
11610 extern struct x86_cpuinit_ops x86_cpuinit;
11611diff -urNp linux-2.6.32.46/arch/x86/include/asm/xsave.h linux-2.6.32.46/arch/x86/include/asm/xsave.h
11612--- linux-2.6.32.46/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
11613+++ linux-2.6.32.46/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
11614@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11615 static inline int xsave_user(struct xsave_struct __user *buf)
11616 {
11617 int err;
11618+
11619+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11620+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11621+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11622+#endif
11623+
11624 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11625 "2:\n"
11626 ".section .fixup,\"ax\"\n"
11627@@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
11628 u32 lmask = mask;
11629 u32 hmask = mask >> 32;
11630
11631+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11632+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11633+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11634+#endif
11635+
11636 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11637 "2:\n"
11638 ".section .fixup,\"ax\"\n"
11639diff -urNp linux-2.6.32.46/arch/x86/Kconfig linux-2.6.32.46/arch/x86/Kconfig
11640--- linux-2.6.32.46/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
11641+++ linux-2.6.32.46/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
11642@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11643
11644 config X86_32_LAZY_GS
11645 def_bool y
11646- depends on X86_32 && !CC_STACKPROTECTOR
11647+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11648
11649 config KTIME_SCALAR
11650 def_bool X86_32
11651@@ -1008,7 +1008,7 @@ choice
11652
11653 config NOHIGHMEM
11654 bool "off"
11655- depends on !X86_NUMAQ
11656+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11657 ---help---
11658 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11659 However, the address space of 32-bit x86 processors is only 4
11660@@ -1045,7 +1045,7 @@ config NOHIGHMEM
11661
11662 config HIGHMEM4G
11663 bool "4GB"
11664- depends on !X86_NUMAQ
11665+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11666 ---help---
11667 Select this if you have a 32-bit processor and between 1 and 4
11668 gigabytes of physical RAM.
11669@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11670 hex
11671 default 0xB0000000 if VMSPLIT_3G_OPT
11672 default 0x80000000 if VMSPLIT_2G
11673- default 0x78000000 if VMSPLIT_2G_OPT
11674+ default 0x70000000 if VMSPLIT_2G_OPT
11675 default 0x40000000 if VMSPLIT_1G
11676 default 0xC0000000
11677 depends on X86_32
11678@@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11679
11680 config EFI
11681 bool "EFI runtime service support"
11682- depends on ACPI
11683+ depends on ACPI && !PAX_KERNEXEC
11684 ---help---
11685 This enables the kernel to use EFI runtime services that are
11686 available (such as the EFI variable services).
11687@@ -1460,6 +1460,7 @@ config SECCOMP
11688
11689 config CC_STACKPROTECTOR
11690 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11691+ depends on X86_64 || !PAX_MEMORY_UDEREF
11692 ---help---
11693 This option turns on the -fstack-protector GCC feature. This
11694 feature puts, at the beginning of functions, a canary value on
11695@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11696 config PHYSICAL_START
11697 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11698 default "0x1000000"
11699+ range 0x400000 0x40000000
11700 ---help---
11701 This gives the physical address where the kernel is loaded.
11702
11703@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11704 hex
11705 prompt "Alignment value to which kernel should be aligned" if X86_32
11706 default "0x1000000"
11707+ range 0x400000 0x1000000 if PAX_KERNEXEC
11708 range 0x2000 0x1000000
11709 ---help---
11710 This value puts the alignment restrictions on physical address
11711@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11712 Say N if you want to disable CPU hotplug.
11713
11714 config COMPAT_VDSO
11715- def_bool y
11716+ def_bool n
11717 prompt "Compat VDSO support"
11718 depends on X86_32 || IA32_EMULATION
11719+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11720 ---help---
11721 Map the 32-bit VDSO to the predictable old-style address too.
11722 ---help---
11723diff -urNp linux-2.6.32.46/arch/x86/Kconfig.cpu linux-2.6.32.46/arch/x86/Kconfig.cpu
11724--- linux-2.6.32.46/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11725+++ linux-2.6.32.46/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11726@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11727
11728 config X86_F00F_BUG
11729 def_bool y
11730- depends on M586MMX || M586TSC || M586 || M486 || M386
11731+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11732
11733 config X86_WP_WORKS_OK
11734 def_bool y
11735@@ -360,7 +360,7 @@ config X86_POPAD_OK
11736
11737 config X86_ALIGNMENT_16
11738 def_bool y
11739- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11740+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11741
11742 config X86_INTEL_USERCOPY
11743 def_bool y
11744@@ -406,7 +406,7 @@ config X86_CMPXCHG64
11745 # generates cmov.
11746 config X86_CMOV
11747 def_bool y
11748- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11749+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11750
11751 config X86_MINIMUM_CPU_FAMILY
11752 int
11753diff -urNp linux-2.6.32.46/arch/x86/Kconfig.debug linux-2.6.32.46/arch/x86/Kconfig.debug
11754--- linux-2.6.32.46/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11755+++ linux-2.6.32.46/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11756@@ -99,7 +99,7 @@ config X86_PTDUMP
11757 config DEBUG_RODATA
11758 bool "Write protect kernel read-only data structures"
11759 default y
11760- depends on DEBUG_KERNEL
11761+ depends on DEBUG_KERNEL && BROKEN
11762 ---help---
11763 Mark the kernel read-only data as write-protected in the pagetables,
11764 in order to catch accidental (and incorrect) writes to such const
11765diff -urNp linux-2.6.32.46/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.32.46/arch/x86/kernel/acpi/realmode/Makefile
11766--- linux-2.6.32.46/arch/x86/kernel/acpi/realmode/Makefile 2011-03-27 14:31:47.000000000 -0400
11767+++ linux-2.6.32.46/arch/x86/kernel/acpi/realmode/Makefile 2011-08-07 14:38:58.000000000 -0400
11768@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
11769 $(call cc-option, -fno-stack-protector) \
11770 $(call cc-option, -mpreferred-stack-boundary=2)
11771 KBUILD_CFLAGS += $(call cc-option, -m32)
11772+ifdef CONSTIFY_PLUGIN
11773+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11774+endif
11775 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11776 GCOV_PROFILE := n
11777
11778diff -urNp linux-2.6.32.46/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.46/arch/x86/kernel/acpi/realmode/wakeup.S
11779--- linux-2.6.32.46/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11780+++ linux-2.6.32.46/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-01 18:53:40.000000000 -0400
11781@@ -91,6 +91,9 @@ _start:
11782 /* Do any other stuff... */
11783
11784 #ifndef CONFIG_64BIT
11785+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
11786+ call verify_cpu
11787+
11788 /* This could also be done in C code... */
11789 movl pmode_cr3, %eax
11790 movl %eax, %cr3
11791@@ -104,7 +107,7 @@ _start:
11792 movl %eax, %ecx
11793 orl %edx, %ecx
11794 jz 1f
11795- movl $0xc0000080, %ecx
11796+ mov $MSR_EFER, %ecx
11797 wrmsr
11798 1:
11799
11800@@ -114,6 +117,7 @@ _start:
11801 movl pmode_cr0, %eax
11802 movl %eax, %cr0
11803 jmp pmode_return
11804+# include "../../verify_cpu.S"
11805 #else
11806 pushw $0
11807 pushw trampoline_segment
11808diff -urNp linux-2.6.32.46/arch/x86/kernel/acpi/sleep.c linux-2.6.32.46/arch/x86/kernel/acpi/sleep.c
11809--- linux-2.6.32.46/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11810+++ linux-2.6.32.46/arch/x86/kernel/acpi/sleep.c 2011-07-01 19:01:34.000000000 -0400
11811@@ -11,11 +11,12 @@
11812 #include <linux/cpumask.h>
11813 #include <asm/segment.h>
11814 #include <asm/desc.h>
11815+#include <asm/e820.h>
11816
11817 #include "realmode/wakeup.h"
11818 #include "sleep.h"
11819
11820-unsigned long acpi_wakeup_address;
11821+unsigned long acpi_wakeup_address = 0x2000;
11822 unsigned long acpi_realmode_flags;
11823
11824 /* address in low memory of the wakeup routine. */
11825@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
11826 #else /* CONFIG_64BIT */
11827 header->trampoline_segment = setup_trampoline() >> 4;
11828 #ifdef CONFIG_SMP
11829- stack_start.sp = temp_stack + sizeof(temp_stack);
11830+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11831+
11832+ pax_open_kernel();
11833 early_gdt_descr.address =
11834 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11835+ pax_close_kernel();
11836+
11837 initial_gs = per_cpu_offset(smp_processor_id());
11838 #endif
11839 initial_code = (unsigned long)wakeup_long64;
11840@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11841 return;
11842 }
11843
11844- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11845-
11846- if (!acpi_realmode) {
11847- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11848- return;
11849- }
11850-
11851- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11852+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11853+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11854 }
11855
11856
11857diff -urNp linux-2.6.32.46/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.46/arch/x86/kernel/acpi/wakeup_32.S
11858--- linux-2.6.32.46/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11859+++ linux-2.6.32.46/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11860@@ -30,13 +30,11 @@ wakeup_pmode_return:
11861 # and restore the stack ... but you need gdt for this to work
11862 movl saved_context_esp, %esp
11863
11864- movl %cs:saved_magic, %eax
11865- cmpl $0x12345678, %eax
11866+ cmpl $0x12345678, saved_magic
11867 jne bogus_magic
11868
11869 # jump to place where we left off
11870- movl saved_eip, %eax
11871- jmp *%eax
11872+ jmp *(saved_eip)
11873
11874 bogus_magic:
11875 jmp bogus_magic
11876diff -urNp linux-2.6.32.46/arch/x86/kernel/alternative.c linux-2.6.32.46/arch/x86/kernel/alternative.c
11877--- linux-2.6.32.46/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11878+++ linux-2.6.32.46/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11879@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11880
11881 BUG_ON(p->len > MAX_PATCH_LEN);
11882 /* prep the buffer with the original instructions */
11883- memcpy(insnbuf, p->instr, p->len);
11884+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11885 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11886 (unsigned long)p->instr, p->len);
11887
11888@@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11889 if (smp_alt_once)
11890 free_init_pages("SMP alternatives",
11891 (unsigned long)__smp_locks,
11892- (unsigned long)__smp_locks_end);
11893+ PAGE_ALIGN((unsigned long)__smp_locks_end));
11894
11895 restart_nmi();
11896 }
11897@@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11898 * instructions. And on the local CPU you need to be protected again NMI or MCE
11899 * handlers seeing an inconsistent instruction while you patch.
11900 */
11901-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11902+static void *__kprobes text_poke_early(void *addr, const void *opcode,
11903 size_t len)
11904 {
11905 unsigned long flags;
11906 local_irq_save(flags);
11907- memcpy(addr, opcode, len);
11908+
11909+ pax_open_kernel();
11910+ memcpy(ktla_ktva(addr), opcode, len);
11911 sync_core();
11912+ pax_close_kernel();
11913+
11914 local_irq_restore(flags);
11915 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11916 that causes hangs on some VIA CPUs. */
11917@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11918 */
11919 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11920 {
11921- unsigned long flags;
11922- char *vaddr;
11923+ unsigned char *vaddr = ktla_ktva(addr);
11924 struct page *pages[2];
11925- int i;
11926+ size_t i;
11927
11928 if (!core_kernel_text((unsigned long)addr)) {
11929- pages[0] = vmalloc_to_page(addr);
11930- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11931+ pages[0] = vmalloc_to_page(vaddr);
11932+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11933 } else {
11934- pages[0] = virt_to_page(addr);
11935+ pages[0] = virt_to_page(vaddr);
11936 WARN_ON(!PageReserved(pages[0]));
11937- pages[1] = virt_to_page(addr + PAGE_SIZE);
11938+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11939 }
11940 BUG_ON(!pages[0]);
11941- local_irq_save(flags);
11942- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11943- if (pages[1])
11944- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11945- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11946- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11947- clear_fixmap(FIX_TEXT_POKE0);
11948- if (pages[1])
11949- clear_fixmap(FIX_TEXT_POKE1);
11950- local_flush_tlb();
11951- sync_core();
11952- /* Could also do a CLFLUSH here to speed up CPU recovery; but
11953- that causes hangs on some VIA CPUs. */
11954+ text_poke_early(addr, opcode, len);
11955 for (i = 0; i < len; i++)
11956- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11957- local_irq_restore(flags);
11958+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11959 return addr;
11960 }
11961diff -urNp linux-2.6.32.46/arch/x86/kernel/amd_iommu.c linux-2.6.32.46/arch/x86/kernel/amd_iommu.c
11962--- linux-2.6.32.46/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11963+++ linux-2.6.32.46/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11964@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11965 }
11966 }
11967
11968-static struct dma_map_ops amd_iommu_dma_ops = {
11969+static const struct dma_map_ops amd_iommu_dma_ops = {
11970 .alloc_coherent = alloc_coherent,
11971 .free_coherent = free_coherent,
11972 .map_page = map_page,
11973diff -urNp linux-2.6.32.46/arch/x86/kernel/apic/apic.c linux-2.6.32.46/arch/x86/kernel/apic/apic.c
11974--- linux-2.6.32.46/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11975+++ linux-2.6.32.46/arch/x86/kernel/apic/apic.c 2011-08-17 20:00:16.000000000 -0400
11976@@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
11977 /*
11978 * Debug level, exported for io_apic.c
11979 */
11980-unsigned int apic_verbosity;
11981+int apic_verbosity;
11982
11983 int pic_mode;
11984
11985@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11986 apic_write(APIC_ESR, 0);
11987 v1 = apic_read(APIC_ESR);
11988 ack_APIC_irq();
11989- atomic_inc(&irq_err_count);
11990+ atomic_inc_unchecked(&irq_err_count);
11991
11992 /*
11993 * Here is what the APIC error bits mean:
11994@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11995 u16 *bios_cpu_apicid;
11996 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11997
11998+ pax_track_stack();
11999+
12000 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
12001 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
12002
12003diff -urNp linux-2.6.32.46/arch/x86/kernel/apic/io_apic.c linux-2.6.32.46/arch/x86/kernel/apic/io_apic.c
12004--- linux-2.6.32.46/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
12005+++ linux-2.6.32.46/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
12006@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
12007 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
12008 GFP_ATOMIC);
12009 if (!ioapic_entries)
12010- return 0;
12011+ return NULL;
12012
12013 for (apic = 0; apic < nr_ioapics; apic++) {
12014 ioapic_entries[apic] =
12015@@ -733,7 +733,7 @@ nomem:
12016 kfree(ioapic_entries[apic]);
12017 kfree(ioapic_entries);
12018
12019- return 0;
12020+ return NULL;
12021 }
12022
12023 /*
12024@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
12025 }
12026 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
12027
12028-void lock_vector_lock(void)
12029+void lock_vector_lock(void) __acquires(vector_lock)
12030 {
12031 /* Used to the online set of cpus does not change
12032 * during assign_irq_vector.
12033@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
12034 spin_lock(&vector_lock);
12035 }
12036
12037-void unlock_vector_lock(void)
12038+void unlock_vector_lock(void) __releases(vector_lock)
12039 {
12040 spin_unlock(&vector_lock);
12041 }
12042@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
12043 ack_APIC_irq();
12044 }
12045
12046-atomic_t irq_mis_count;
12047+atomic_unchecked_t irq_mis_count;
12048
12049 static void ack_apic_level(unsigned int irq)
12050 {
12051@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
12052
12053 /* Tail end of version 0x11 I/O APIC bug workaround */
12054 if (!(v & (1 << (i & 0x1f)))) {
12055- atomic_inc(&irq_mis_count);
12056+ atomic_inc_unchecked(&irq_mis_count);
12057 spin_lock(&ioapic_lock);
12058 __mask_and_edge_IO_APIC_irq(cfg);
12059 __unmask_and_level_IO_APIC_irq(cfg);
12060diff -urNp linux-2.6.32.46/arch/x86/kernel/apm_32.c linux-2.6.32.46/arch/x86/kernel/apm_32.c
12061--- linux-2.6.32.46/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
12062+++ linux-2.6.32.46/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
12063@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
12064 * This is for buggy BIOS's that refer to (real mode) segment 0x40
12065 * even though they are called in protected mode.
12066 */
12067-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
12068+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
12069 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
12070
12071 static const char driver_version[] = "1.16ac"; /* no spaces */
12072@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
12073 BUG_ON(cpu != 0);
12074 gdt = get_cpu_gdt_table(cpu);
12075 save_desc_40 = gdt[0x40 / 8];
12076+
12077+ pax_open_kernel();
12078 gdt[0x40 / 8] = bad_bios_desc;
12079+ pax_close_kernel();
12080
12081 apm_irq_save(flags);
12082 APM_DO_SAVE_SEGS;
12083@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
12084 &call->esi);
12085 APM_DO_RESTORE_SEGS;
12086 apm_irq_restore(flags);
12087+
12088+ pax_open_kernel();
12089 gdt[0x40 / 8] = save_desc_40;
12090+ pax_close_kernel();
12091+
12092 put_cpu();
12093
12094 return call->eax & 0xff;
12095@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
12096 BUG_ON(cpu != 0);
12097 gdt = get_cpu_gdt_table(cpu);
12098 save_desc_40 = gdt[0x40 / 8];
12099+
12100+ pax_open_kernel();
12101 gdt[0x40 / 8] = bad_bios_desc;
12102+ pax_close_kernel();
12103
12104 apm_irq_save(flags);
12105 APM_DO_SAVE_SEGS;
12106@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
12107 &call->eax);
12108 APM_DO_RESTORE_SEGS;
12109 apm_irq_restore(flags);
12110+
12111+ pax_open_kernel();
12112 gdt[0x40 / 8] = save_desc_40;
12113+ pax_close_kernel();
12114+
12115 put_cpu();
12116 return error;
12117 }
12118@@ -975,7 +989,7 @@ recalc:
12119
12120 static void apm_power_off(void)
12121 {
12122- unsigned char po_bios_call[] = {
12123+ const unsigned char po_bios_call[] = {
12124 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
12125 0x8e, 0xd0, /* movw ax,ss */
12126 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
12127@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
12128 * code to that CPU.
12129 */
12130 gdt = get_cpu_gdt_table(0);
12131+
12132+ pax_open_kernel();
12133 set_desc_base(&gdt[APM_CS >> 3],
12134 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
12135 set_desc_base(&gdt[APM_CS_16 >> 3],
12136 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
12137 set_desc_base(&gdt[APM_DS >> 3],
12138 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
12139+ pax_close_kernel();
12140
12141 proc_create("apm", 0, NULL, &apm_file_ops);
12142
12143diff -urNp linux-2.6.32.46/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.46/arch/x86/kernel/asm-offsets_32.c
12144--- linux-2.6.32.46/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
12145+++ linux-2.6.32.46/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
12146@@ -51,7 +51,6 @@ void foo(void)
12147 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
12148 BLANK();
12149
12150- OFFSET(TI_task, thread_info, task);
12151 OFFSET(TI_exec_domain, thread_info, exec_domain);
12152 OFFSET(TI_flags, thread_info, flags);
12153 OFFSET(TI_status, thread_info, status);
12154@@ -60,6 +59,8 @@ void foo(void)
12155 OFFSET(TI_restart_block, thread_info, restart_block);
12156 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
12157 OFFSET(TI_cpu, thread_info, cpu);
12158+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
12159+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12160 BLANK();
12161
12162 OFFSET(GDS_size, desc_ptr, size);
12163@@ -99,6 +100,7 @@ void foo(void)
12164
12165 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12166 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
12167+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12168 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
12169 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
12170 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
12171@@ -115,6 +117,11 @@ void foo(void)
12172 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
12173 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12174 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12175+
12176+#ifdef CONFIG_PAX_KERNEXEC
12177+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12178+#endif
12179+
12180 #endif
12181
12182 #ifdef CONFIG_XEN
12183diff -urNp linux-2.6.32.46/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.46/arch/x86/kernel/asm-offsets_64.c
12184--- linux-2.6.32.46/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
12185+++ linux-2.6.32.46/arch/x86/kernel/asm-offsets_64.c 2011-08-23 20:24:19.000000000 -0400
12186@@ -44,6 +44,8 @@ int main(void)
12187 ENTRY(addr_limit);
12188 ENTRY(preempt_count);
12189 ENTRY(status);
12190+ ENTRY(lowest_stack);
12191+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12192 #ifdef CONFIG_IA32_EMULATION
12193 ENTRY(sysenter_return);
12194 #endif
12195@@ -63,6 +65,18 @@ int main(void)
12196 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12197 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
12198 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
12199+
12200+#ifdef CONFIG_PAX_KERNEXEC
12201+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12202+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12203+#endif
12204+
12205+#ifdef CONFIG_PAX_MEMORY_UDEREF
12206+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
12207+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
12208+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
12209+#endif
12210+
12211 #endif
12212
12213
12214@@ -115,6 +129,7 @@ int main(void)
12215 ENTRY(cr8);
12216 BLANK();
12217 #undef ENTRY
12218+ DEFINE(TSS_size, sizeof(struct tss_struct));
12219 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
12220 BLANK();
12221 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
12222@@ -130,6 +145,7 @@ int main(void)
12223
12224 BLANK();
12225 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12226+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12227 #ifdef CONFIG_XEN
12228 BLANK();
12229 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
12230diff -urNp linux-2.6.32.46/arch/x86/kernel/cpu/amd.c linux-2.6.32.46/arch/x86/kernel/cpu/amd.c
12231--- linux-2.6.32.46/arch/x86/kernel/cpu/amd.c 2011-06-25 12:55:34.000000000 -0400
12232+++ linux-2.6.32.46/arch/x86/kernel/cpu/amd.c 2011-06-25 12:56:37.000000000 -0400
12233@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
12234 unsigned int size)
12235 {
12236 /* AMD errata T13 (order #21922) */
12237- if ((c->x86 == 6)) {
12238+ if (c->x86 == 6) {
12239 /* Duron Rev A0 */
12240 if (c->x86_model == 3 && c->x86_mask == 0)
12241 size = 64;
12242diff -urNp linux-2.6.32.46/arch/x86/kernel/cpu/common.c linux-2.6.32.46/arch/x86/kernel/cpu/common.c
12243--- linux-2.6.32.46/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
12244+++ linux-2.6.32.46/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
12245@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
12246
12247 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
12248
12249-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
12250-#ifdef CONFIG_X86_64
12251- /*
12252- * We need valid kernel segments for data and code in long mode too
12253- * IRET will check the segment types kkeil 2000/10/28
12254- * Also sysret mandates a special GDT layout
12255- *
12256- * TLS descriptors are currently at a different place compared to i386.
12257- * Hopefully nobody expects them at a fixed place (Wine?)
12258- */
12259- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
12260- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
12261- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
12262- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
12263- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
12264- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
12265-#else
12266- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
12267- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12268- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
12269- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
12270- /*
12271- * Segments used for calling PnP BIOS have byte granularity.
12272- * They code segments and data segments have fixed 64k limits,
12273- * the transfer segment sizes are set at run time.
12274- */
12275- /* 32-bit code */
12276- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12277- /* 16-bit code */
12278- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12279- /* 16-bit data */
12280- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
12281- /* 16-bit data */
12282- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
12283- /* 16-bit data */
12284- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
12285- /*
12286- * The APM segments have byte granularity and their bases
12287- * are set at run time. All have 64k limits.
12288- */
12289- /* 32-bit code */
12290- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12291- /* 16-bit code */
12292- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12293- /* data */
12294- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
12295-
12296- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12297- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12298- GDT_STACK_CANARY_INIT
12299-#endif
12300-} };
12301-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12302-
12303 static int __init x86_xsave_setup(char *s)
12304 {
12305 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12306@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
12307 {
12308 struct desc_ptr gdt_descr;
12309
12310- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12311+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12312 gdt_descr.size = GDT_SIZE - 1;
12313 load_gdt(&gdt_descr);
12314 /* Reload the per-cpu base */
12315@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
12316 /* Filter out anything that depends on CPUID levels we don't have */
12317 filter_cpuid_features(c, true);
12318
12319+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
12320+ setup_clear_cpu_cap(X86_FEATURE_SEP);
12321+#endif
12322+
12323 /* If the model name is still unset, do table lookup. */
12324 if (!c->x86_model_id[0]) {
12325 const char *p;
12326@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
12327 }
12328 __setup("clearcpuid=", setup_disablecpuid);
12329
12330+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12331+EXPORT_PER_CPU_SYMBOL(current_tinfo);
12332+
12333 #ifdef CONFIG_X86_64
12334 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12335
12336@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
12337 EXPORT_PER_CPU_SYMBOL(current_task);
12338
12339 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12340- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12341+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12342 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12343
12344 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12345@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
12346 {
12347 memset(regs, 0, sizeof(struct pt_regs));
12348 regs->fs = __KERNEL_PERCPU;
12349- regs->gs = __KERNEL_STACK_CANARY;
12350+ savesegment(gs, regs->gs);
12351
12352 return regs;
12353 }
12354@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
12355 int i;
12356
12357 cpu = stack_smp_processor_id();
12358- t = &per_cpu(init_tss, cpu);
12359+ t = init_tss + cpu;
12360 orig_ist = &per_cpu(orig_ist, cpu);
12361
12362 #ifdef CONFIG_NUMA
12363@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
12364 switch_to_new_gdt(cpu);
12365 loadsegment(fs, 0);
12366
12367- load_idt((const struct desc_ptr *)&idt_descr);
12368+ load_idt(&idt_descr);
12369
12370 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12371 syscall_init();
12372@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
12373 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12374 barrier();
12375
12376- check_efer();
12377 if (cpu != 0)
12378 enable_x2apic();
12379
12380@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
12381 {
12382 int cpu = smp_processor_id();
12383 struct task_struct *curr = current;
12384- struct tss_struct *t = &per_cpu(init_tss, cpu);
12385+ struct tss_struct *t = init_tss + cpu;
12386 struct thread_struct *thread = &curr->thread;
12387
12388 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12389diff -urNp linux-2.6.32.46/arch/x86/kernel/cpu/intel.c linux-2.6.32.46/arch/x86/kernel/cpu/intel.c
12390--- linux-2.6.32.46/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
12391+++ linux-2.6.32.46/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
12392@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
12393 * Update the IDT descriptor and reload the IDT so that
12394 * it uses the read-only mapped virtual address.
12395 */
12396- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12397+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12398 load_idt(&idt_descr);
12399 }
12400 #endif
12401diff -urNp linux-2.6.32.46/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.46/arch/x86/kernel/cpu/intel_cacheinfo.c
12402--- linux-2.6.32.46/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
12403+++ linux-2.6.32.46/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
12404@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
12405 return ret;
12406 }
12407
12408-static struct sysfs_ops sysfs_ops = {
12409+static const struct sysfs_ops sysfs_ops = {
12410 .show = show,
12411 .store = store,
12412 };
12413diff -urNp linux-2.6.32.46/arch/x86/kernel/cpu/Makefile linux-2.6.32.46/arch/x86/kernel/cpu/Makefile
12414--- linux-2.6.32.46/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
12415+++ linux-2.6.32.46/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
12416@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
12417 CFLAGS_REMOVE_common.o = -pg
12418 endif
12419
12420-# Make sure load_percpu_segment has no stackprotector
12421-nostackp := $(call cc-option, -fno-stack-protector)
12422-CFLAGS_common.o := $(nostackp)
12423-
12424 obj-y := intel_cacheinfo.o addon_cpuid_features.o
12425 obj-y += proc.o capflags.o powerflags.o common.o
12426 obj-y += vmware.o hypervisor.o sched.o
12427diff -urNp linux-2.6.32.46/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.46/arch/x86/kernel/cpu/mcheck/mce_amd.c
12428--- linux-2.6.32.46/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
12429+++ linux-2.6.32.46/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
12430@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
12431 return ret;
12432 }
12433
12434-static struct sysfs_ops threshold_ops = {
12435+static const struct sysfs_ops threshold_ops = {
12436 .show = show,
12437 .store = store,
12438 };
12439diff -urNp linux-2.6.32.46/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.46/arch/x86/kernel/cpu/mcheck/mce.c
12440--- linux-2.6.32.46/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
12441+++ linux-2.6.32.46/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
12442@@ -43,6 +43,7 @@
12443 #include <asm/ipi.h>
12444 #include <asm/mce.h>
12445 #include <asm/msr.h>
12446+#include <asm/local.h>
12447
12448 #include "mce-internal.h"
12449
12450@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
12451 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12452 m->cs, m->ip);
12453
12454- if (m->cs == __KERNEL_CS)
12455+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12456 print_symbol("{%s}", m->ip);
12457 pr_cont("\n");
12458 }
12459@@ -221,10 +222,10 @@ static void print_mce_tail(void)
12460
12461 #define PANIC_TIMEOUT 5 /* 5 seconds */
12462
12463-static atomic_t mce_paniced;
12464+static atomic_unchecked_t mce_paniced;
12465
12466 static int fake_panic;
12467-static atomic_t mce_fake_paniced;
12468+static atomic_unchecked_t mce_fake_paniced;
12469
12470 /* Panic in progress. Enable interrupts and wait for final IPI */
12471 static void wait_for_panic(void)
12472@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
12473 /*
12474 * Make sure only one CPU runs in machine check panic
12475 */
12476- if (atomic_inc_return(&mce_paniced) > 1)
12477+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12478 wait_for_panic();
12479 barrier();
12480
12481@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
12482 console_verbose();
12483 } else {
12484 /* Don't log too much for fake panic */
12485- if (atomic_inc_return(&mce_fake_paniced) > 1)
12486+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12487 return;
12488 }
12489 print_mce_head();
12490@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
12491 * might have been modified by someone else.
12492 */
12493 rmb();
12494- if (atomic_read(&mce_paniced))
12495+ if (atomic_read_unchecked(&mce_paniced))
12496 wait_for_panic();
12497 if (!monarch_timeout)
12498 goto out;
12499@@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
12500 */
12501
12502 static DEFINE_SPINLOCK(mce_state_lock);
12503-static int open_count; /* #times opened */
12504+static local_t open_count; /* #times opened */
12505 static int open_exclu; /* already open exclusive? */
12506
12507 static int mce_open(struct inode *inode, struct file *file)
12508 {
12509 spin_lock(&mce_state_lock);
12510
12511- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
12512+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
12513 spin_unlock(&mce_state_lock);
12514
12515 return -EBUSY;
12516@@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
12517
12518 if (file->f_flags & O_EXCL)
12519 open_exclu = 1;
12520- open_count++;
12521+ local_inc(&open_count);
12522
12523 spin_unlock(&mce_state_lock);
12524
12525@@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
12526 {
12527 spin_lock(&mce_state_lock);
12528
12529- open_count--;
12530+ local_dec(&open_count);
12531 open_exclu = 0;
12532
12533 spin_unlock(&mce_state_lock);
12534@@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
12535 static void mce_reset(void)
12536 {
12537 cpu_missing = 0;
12538- atomic_set(&mce_fake_paniced, 0);
12539+ atomic_set_unchecked(&mce_fake_paniced, 0);
12540 atomic_set(&mce_executing, 0);
12541 atomic_set(&mce_callin, 0);
12542 atomic_set(&global_nwo, 0);
12543diff -urNp linux-2.6.32.46/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.32.46/arch/x86/kernel/cpu/mcheck/mce-inject.c
12544--- linux-2.6.32.46/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-03-27 14:31:47.000000000 -0400
12545+++ linux-2.6.32.46/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-05 20:33:55.000000000 -0400
12546@@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *fi
12547 static int inject_init(void)
12548 {
12549 printk(KERN_INFO "Machine check injector initialized\n");
12550- mce_chrdev_ops.write = mce_write;
12551+ pax_open_kernel();
12552+ *(void **)&mce_chrdev_ops.write = mce_write;
12553+ pax_close_kernel();
12554 register_die_notifier(&mce_raise_nb);
12555 return 0;
12556 }
12557diff -urNp linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/amd.c
12558--- linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
12559+++ linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
12560@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
12561 return 0;
12562 }
12563
12564-static struct mtrr_ops amd_mtrr_ops = {
12565+static const struct mtrr_ops amd_mtrr_ops = {
12566 .vendor = X86_VENDOR_AMD,
12567 .set = amd_set_mtrr,
12568 .get = amd_get_mtrr,
12569diff -urNp linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/centaur.c
12570--- linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
12571+++ linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
12572@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
12573 return 0;
12574 }
12575
12576-static struct mtrr_ops centaur_mtrr_ops = {
12577+static const struct mtrr_ops centaur_mtrr_ops = {
12578 .vendor = X86_VENDOR_CENTAUR,
12579 .set = centaur_set_mcr,
12580 .get = centaur_get_mcr,
12581diff -urNp linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/cyrix.c
12582--- linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
12583+++ linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
12584@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
12585 post_set();
12586 }
12587
12588-static struct mtrr_ops cyrix_mtrr_ops = {
12589+static const struct mtrr_ops cyrix_mtrr_ops = {
12590 .vendor = X86_VENDOR_CYRIX,
12591 .set_all = cyrix_set_all,
12592 .set = cyrix_set_arr,
12593diff -urNp linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/generic.c
12594--- linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
12595+++ linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
12596@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
12597 /*
12598 * Generic structure...
12599 */
12600-struct mtrr_ops generic_mtrr_ops = {
12601+const struct mtrr_ops generic_mtrr_ops = {
12602 .use_intel_if = 1,
12603 .set_all = generic_set_all,
12604 .get = generic_get_mtrr,
12605diff -urNp linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/main.c
12606--- linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
12607+++ linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
12608@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
12609 u64 size_or_mask, size_and_mask;
12610 static bool mtrr_aps_delayed_init;
12611
12612-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12613+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12614
12615-struct mtrr_ops *mtrr_if;
12616+const struct mtrr_ops *mtrr_if;
12617
12618 static void set_mtrr(unsigned int reg, unsigned long base,
12619 unsigned long size, mtrr_type type);
12620
12621-void set_mtrr_ops(struct mtrr_ops *ops)
12622+void set_mtrr_ops(const struct mtrr_ops *ops)
12623 {
12624 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12625 mtrr_ops[ops->vendor] = ops;
12626diff -urNp linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/mtrr.h
12627--- linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
12628+++ linux-2.6.32.46/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-26 20:23:57.000000000 -0400
12629@@ -25,14 +25,14 @@ struct mtrr_ops {
12630 int (*validate_add_page)(unsigned long base, unsigned long size,
12631 unsigned int type);
12632 int (*have_wrcomb)(void);
12633-};
12634+} __do_const;
12635
12636 extern int generic_get_free_region(unsigned long base, unsigned long size,
12637 int replace_reg);
12638 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12639 unsigned int type);
12640
12641-extern struct mtrr_ops generic_mtrr_ops;
12642+extern const struct mtrr_ops generic_mtrr_ops;
12643
12644 extern int positive_have_wrcomb(void);
12645
12646@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12647 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12648 void get_mtrr_state(void);
12649
12650-extern void set_mtrr_ops(struct mtrr_ops *ops);
12651+extern void set_mtrr_ops(const struct mtrr_ops *ops);
12652
12653 extern u64 size_or_mask, size_and_mask;
12654-extern struct mtrr_ops *mtrr_if;
12655+extern const struct mtrr_ops *mtrr_if;
12656
12657 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12658 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12659diff -urNp linux-2.6.32.46/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.46/arch/x86/kernel/cpu/perfctr-watchdog.c
12660--- linux-2.6.32.46/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
12661+++ linux-2.6.32.46/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
12662@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12663
12664 /* Interface defining a CPU specific perfctr watchdog */
12665 struct wd_ops {
12666- int (*reserve)(void);
12667- void (*unreserve)(void);
12668- int (*setup)(unsigned nmi_hz);
12669- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12670- void (*stop)(void);
12671+ int (* const reserve)(void);
12672+ void (* const unreserve)(void);
12673+ int (* const setup)(unsigned nmi_hz);
12674+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12675+ void (* const stop)(void);
12676 unsigned perfctr;
12677 unsigned evntsel;
12678 u64 checkbit;
12679@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12680 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12681 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12682
12683+/* cannot be const */
12684 static struct wd_ops intel_arch_wd_ops;
12685
12686 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12687@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12688 return 1;
12689 }
12690
12691+/* cannot be const */
12692 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12693 .reserve = single_msr_reserve,
12694 .unreserve = single_msr_unreserve,
12695diff -urNp linux-2.6.32.46/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.46/arch/x86/kernel/cpu/perf_event.c
12696--- linux-2.6.32.46/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12697+++ linux-2.6.32.46/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12698@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12699 * count to the generic event atomically:
12700 */
12701 again:
12702- prev_raw_count = atomic64_read(&hwc->prev_count);
12703+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12704 rdmsrl(hwc->event_base + idx, new_raw_count);
12705
12706- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12707+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12708 new_raw_count) != prev_raw_count)
12709 goto again;
12710
12711@@ -741,7 +741,7 @@ again:
12712 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12713 delta >>= shift;
12714
12715- atomic64_add(delta, &event->count);
12716+ atomic64_add_unchecked(delta, &event->count);
12717 atomic64_sub(delta, &hwc->period_left);
12718
12719 return new_raw_count;
12720@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12721 * The hw event starts counting from this event offset,
12722 * mark it to be able to extra future deltas:
12723 */
12724- atomic64_set(&hwc->prev_count, (u64)-left);
12725+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12726
12727 err = checking_wrmsrl(hwc->event_base + idx,
12728 (u64)(-left) & x86_pmu.event_mask);
12729@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12730 break;
12731
12732 callchain_store(entry, frame.return_address);
12733- fp = frame.next_frame;
12734+ fp = (__force const void __user *)frame.next_frame;
12735 }
12736 }
12737
12738diff -urNp linux-2.6.32.46/arch/x86/kernel/crash.c linux-2.6.32.46/arch/x86/kernel/crash.c
12739--- linux-2.6.32.46/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12740+++ linux-2.6.32.46/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12741@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12742 regs = args->regs;
12743
12744 #ifdef CONFIG_X86_32
12745- if (!user_mode_vm(regs)) {
12746+ if (!user_mode(regs)) {
12747 crash_fixup_ss_esp(&fixed_regs, regs);
12748 regs = &fixed_regs;
12749 }
12750diff -urNp linux-2.6.32.46/arch/x86/kernel/doublefault_32.c linux-2.6.32.46/arch/x86/kernel/doublefault_32.c
12751--- linux-2.6.32.46/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12752+++ linux-2.6.32.46/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12753@@ -11,7 +11,7 @@
12754
12755 #define DOUBLEFAULT_STACKSIZE (1024)
12756 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12757-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12758+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12759
12760 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12761
12762@@ -21,7 +21,7 @@ static void doublefault_fn(void)
12763 unsigned long gdt, tss;
12764
12765 store_gdt(&gdt_desc);
12766- gdt = gdt_desc.address;
12767+ gdt = (unsigned long)gdt_desc.address;
12768
12769 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12770
12771@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12772 /* 0x2 bit is always set */
12773 .flags = X86_EFLAGS_SF | 0x2,
12774 .sp = STACK_START,
12775- .es = __USER_DS,
12776+ .es = __KERNEL_DS,
12777 .cs = __KERNEL_CS,
12778 .ss = __KERNEL_DS,
12779- .ds = __USER_DS,
12780+ .ds = __KERNEL_DS,
12781 .fs = __KERNEL_PERCPU,
12782
12783 .__cr3 = __pa_nodebug(swapper_pg_dir),
12784diff -urNp linux-2.6.32.46/arch/x86/kernel/dumpstack_32.c linux-2.6.32.46/arch/x86/kernel/dumpstack_32.c
12785--- linux-2.6.32.46/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12786+++ linux-2.6.32.46/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12787@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12788 #endif
12789
12790 for (;;) {
12791- struct thread_info *context;
12792+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12793+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12794
12795- context = (struct thread_info *)
12796- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12797- bp = print_context_stack(context, stack, bp, ops,
12798- data, NULL, &graph);
12799-
12800- stack = (unsigned long *)context->previous_esp;
12801- if (!stack)
12802+ if (stack_start == task_stack_page(task))
12803 break;
12804+ stack = *(unsigned long **)stack_start;
12805 if (ops->stack(data, "IRQ") < 0)
12806 break;
12807 touch_nmi_watchdog();
12808@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12809 * When in-kernel, we also print out the stack and code at the
12810 * time of the fault..
12811 */
12812- if (!user_mode_vm(regs)) {
12813+ if (!user_mode(regs)) {
12814 unsigned int code_prologue = code_bytes * 43 / 64;
12815 unsigned int code_len = code_bytes;
12816 unsigned char c;
12817 u8 *ip;
12818+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12819
12820 printk(KERN_EMERG "Stack:\n");
12821 show_stack_log_lvl(NULL, regs, &regs->sp,
12822@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12823
12824 printk(KERN_EMERG "Code: ");
12825
12826- ip = (u8 *)regs->ip - code_prologue;
12827+ ip = (u8 *)regs->ip - code_prologue + cs_base;
12828 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12829 /* try starting at IP */
12830- ip = (u8 *)regs->ip;
12831+ ip = (u8 *)regs->ip + cs_base;
12832 code_len = code_len - code_prologue + 1;
12833 }
12834 for (i = 0; i < code_len; i++, ip++) {
12835@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12836 printk(" Bad EIP value.");
12837 break;
12838 }
12839- if (ip == (u8 *)regs->ip)
12840+ if (ip == (u8 *)regs->ip + cs_base)
12841 printk("<%02x> ", c);
12842 else
12843 printk("%02x ", c);
12844@@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12845 {
12846 unsigned short ud2;
12847
12848+ ip = ktla_ktva(ip);
12849 if (ip < PAGE_OFFSET)
12850 return 0;
12851 if (probe_kernel_address((unsigned short *)ip, ud2))
12852diff -urNp linux-2.6.32.46/arch/x86/kernel/dumpstack_64.c linux-2.6.32.46/arch/x86/kernel/dumpstack_64.c
12853--- linux-2.6.32.46/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12854+++ linux-2.6.32.46/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12855@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12856 unsigned long *irq_stack_end =
12857 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12858 unsigned used = 0;
12859- struct thread_info *tinfo;
12860 int graph = 0;
12861+ void *stack_start;
12862
12863 if (!task)
12864 task = current;
12865@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12866 * current stack address. If the stacks consist of nested
12867 * exceptions
12868 */
12869- tinfo = task_thread_info(task);
12870 for (;;) {
12871 char *id;
12872 unsigned long *estack_end;
12873+
12874 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12875 &used, &id);
12876
12877@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12878 if (ops->stack(data, id) < 0)
12879 break;
12880
12881- bp = print_context_stack(tinfo, stack, bp, ops,
12882+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12883 data, estack_end, &graph);
12884 ops->stack(data, "<EOE>");
12885 /*
12886@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12887 if (stack >= irq_stack && stack < irq_stack_end) {
12888 if (ops->stack(data, "IRQ") < 0)
12889 break;
12890- bp = print_context_stack(tinfo, stack, bp,
12891+ bp = print_context_stack(task, irq_stack, stack, bp,
12892 ops, data, irq_stack_end, &graph);
12893 /*
12894 * We link to the next stack (which would be
12895@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12896 /*
12897 * This handles the process stack:
12898 */
12899- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12900+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12901+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12902 put_cpu();
12903 }
12904 EXPORT_SYMBOL(dump_trace);
12905diff -urNp linux-2.6.32.46/arch/x86/kernel/dumpstack.c linux-2.6.32.46/arch/x86/kernel/dumpstack.c
12906--- linux-2.6.32.46/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12907+++ linux-2.6.32.46/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12908@@ -2,6 +2,9 @@
12909 * Copyright (C) 1991, 1992 Linus Torvalds
12910 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12911 */
12912+#ifdef CONFIG_GRKERNSEC_HIDESYM
12913+#define __INCLUDED_BY_HIDESYM 1
12914+#endif
12915 #include <linux/kallsyms.h>
12916 #include <linux/kprobes.h>
12917 #include <linux/uaccess.h>
12918@@ -28,7 +31,7 @@ static int die_counter;
12919
12920 void printk_address(unsigned long address, int reliable)
12921 {
12922- printk(" [<%p>] %s%pS\n", (void *) address,
12923+ printk(" [<%p>] %s%pA\n", (void *) address,
12924 reliable ? "" : "? ", (void *) address);
12925 }
12926
12927@@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12928 static void
12929 print_ftrace_graph_addr(unsigned long addr, void *data,
12930 const struct stacktrace_ops *ops,
12931- struct thread_info *tinfo, int *graph)
12932+ struct task_struct *task, int *graph)
12933 {
12934- struct task_struct *task = tinfo->task;
12935 unsigned long ret_addr;
12936 int index = task->curr_ret_stack;
12937
12938@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12939 static inline void
12940 print_ftrace_graph_addr(unsigned long addr, void *data,
12941 const struct stacktrace_ops *ops,
12942- struct thread_info *tinfo, int *graph)
12943+ struct task_struct *task, int *graph)
12944 { }
12945 #endif
12946
12947@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12948 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12949 */
12950
12951-static inline int valid_stack_ptr(struct thread_info *tinfo,
12952- void *p, unsigned int size, void *end)
12953+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12954 {
12955- void *t = tinfo;
12956 if (end) {
12957 if (p < end && p >= (end-THREAD_SIZE))
12958 return 1;
12959@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12960 }
12961
12962 unsigned long
12963-print_context_stack(struct thread_info *tinfo,
12964+print_context_stack(struct task_struct *task, void *stack_start,
12965 unsigned long *stack, unsigned long bp,
12966 const struct stacktrace_ops *ops, void *data,
12967 unsigned long *end, int *graph)
12968 {
12969 struct stack_frame *frame = (struct stack_frame *)bp;
12970
12971- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12972+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12973 unsigned long addr;
12974
12975 addr = *stack;
12976@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12977 } else {
12978 ops->address(data, addr, 0);
12979 }
12980- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12981+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12982 }
12983 stack++;
12984 }
12985@@ -180,7 +180,7 @@ void dump_stack(void)
12986 #endif
12987
12988 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12989- current->pid, current->comm, print_tainted(),
12990+ task_pid_nr(current), current->comm, print_tainted(),
12991 init_utsname()->release,
12992 (int)strcspn(init_utsname()->version, " "),
12993 init_utsname()->version);
12994@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12995 return flags;
12996 }
12997
12998+extern void gr_handle_kernel_exploit(void);
12999+
13000 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
13001 {
13002 if (regs && kexec_should_crash(current))
13003@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
13004 panic("Fatal exception in interrupt");
13005 if (panic_on_oops)
13006 panic("Fatal exception");
13007- do_exit(signr);
13008+
13009+ gr_handle_kernel_exploit();
13010+
13011+ do_group_exit(signr);
13012 }
13013
13014 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
13015@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
13016 unsigned long flags = oops_begin();
13017 int sig = SIGSEGV;
13018
13019- if (!user_mode_vm(regs))
13020+ if (!user_mode(regs))
13021 report_bug(regs->ip, regs);
13022
13023 if (__die(str, regs, err))
13024diff -urNp linux-2.6.32.46/arch/x86/kernel/dumpstack.h linux-2.6.32.46/arch/x86/kernel/dumpstack.h
13025--- linux-2.6.32.46/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
13026+++ linux-2.6.32.46/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
13027@@ -15,7 +15,7 @@
13028 #endif
13029
13030 extern unsigned long
13031-print_context_stack(struct thread_info *tinfo,
13032+print_context_stack(struct task_struct *task, void *stack_start,
13033 unsigned long *stack, unsigned long bp,
13034 const struct stacktrace_ops *ops, void *data,
13035 unsigned long *end, int *graph);
13036diff -urNp linux-2.6.32.46/arch/x86/kernel/e820.c linux-2.6.32.46/arch/x86/kernel/e820.c
13037--- linux-2.6.32.46/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
13038+++ linux-2.6.32.46/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
13039@@ -733,7 +733,7 @@ struct early_res {
13040 };
13041 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
13042 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
13043- {}
13044+ { 0, 0, {0}, 0 }
13045 };
13046
13047 static int __init find_overlapped_early(u64 start, u64 end)
13048diff -urNp linux-2.6.32.46/arch/x86/kernel/early_printk.c linux-2.6.32.46/arch/x86/kernel/early_printk.c
13049--- linux-2.6.32.46/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
13050+++ linux-2.6.32.46/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
13051@@ -7,6 +7,7 @@
13052 #include <linux/pci_regs.h>
13053 #include <linux/pci_ids.h>
13054 #include <linux/errno.h>
13055+#include <linux/sched.h>
13056 #include <asm/io.h>
13057 #include <asm/processor.h>
13058 #include <asm/fcntl.h>
13059@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
13060 int n;
13061 va_list ap;
13062
13063+ pax_track_stack();
13064+
13065 va_start(ap, fmt);
13066 n = vscnprintf(buf, sizeof(buf), fmt, ap);
13067 early_console->write(early_console, buf, n);
13068diff -urNp linux-2.6.32.46/arch/x86/kernel/efi_32.c linux-2.6.32.46/arch/x86/kernel/efi_32.c
13069--- linux-2.6.32.46/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
13070+++ linux-2.6.32.46/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
13071@@ -38,70 +38,38 @@
13072 */
13073
13074 static unsigned long efi_rt_eflags;
13075-static pgd_t efi_bak_pg_dir_pointer[2];
13076+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
13077
13078-void efi_call_phys_prelog(void)
13079+void __init efi_call_phys_prelog(void)
13080 {
13081- unsigned long cr4;
13082- unsigned long temp;
13083 struct desc_ptr gdt_descr;
13084
13085 local_irq_save(efi_rt_eflags);
13086
13087- /*
13088- * If I don't have PAE, I should just duplicate two entries in page
13089- * directory. If I have PAE, I just need to duplicate one entry in
13090- * page directory.
13091- */
13092- cr4 = read_cr4_safe();
13093
13094- if (cr4 & X86_CR4_PAE) {
13095- efi_bak_pg_dir_pointer[0].pgd =
13096- swapper_pg_dir[pgd_index(0)].pgd;
13097- swapper_pg_dir[0].pgd =
13098- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
13099- } else {
13100- efi_bak_pg_dir_pointer[0].pgd =
13101- swapper_pg_dir[pgd_index(0)].pgd;
13102- efi_bak_pg_dir_pointer[1].pgd =
13103- swapper_pg_dir[pgd_index(0x400000)].pgd;
13104- swapper_pg_dir[pgd_index(0)].pgd =
13105- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
13106- temp = PAGE_OFFSET + 0x400000;
13107- swapper_pg_dir[pgd_index(0x400000)].pgd =
13108- swapper_pg_dir[pgd_index(temp)].pgd;
13109- }
13110+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
13111+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
13112+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
13113
13114 /*
13115 * After the lock is released, the original page table is restored.
13116 */
13117 __flush_tlb_all();
13118
13119- gdt_descr.address = __pa(get_cpu_gdt_table(0));
13120+ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
13121 gdt_descr.size = GDT_SIZE - 1;
13122 load_gdt(&gdt_descr);
13123 }
13124
13125-void efi_call_phys_epilog(void)
13126+void __init efi_call_phys_epilog(void)
13127 {
13128- unsigned long cr4;
13129 struct desc_ptr gdt_descr;
13130
13131- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
13132+ gdt_descr.address = get_cpu_gdt_table(0);
13133 gdt_descr.size = GDT_SIZE - 1;
13134 load_gdt(&gdt_descr);
13135
13136- cr4 = read_cr4_safe();
13137-
13138- if (cr4 & X86_CR4_PAE) {
13139- swapper_pg_dir[pgd_index(0)].pgd =
13140- efi_bak_pg_dir_pointer[0].pgd;
13141- } else {
13142- swapper_pg_dir[pgd_index(0)].pgd =
13143- efi_bak_pg_dir_pointer[0].pgd;
13144- swapper_pg_dir[pgd_index(0x400000)].pgd =
13145- efi_bak_pg_dir_pointer[1].pgd;
13146- }
13147+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
13148
13149 /*
13150 * After the lock is released, the original page table is restored.
13151diff -urNp linux-2.6.32.46/arch/x86/kernel/efi_stub_32.S linux-2.6.32.46/arch/x86/kernel/efi_stub_32.S
13152--- linux-2.6.32.46/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
13153+++ linux-2.6.32.46/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
13154@@ -6,6 +6,7 @@
13155 */
13156
13157 #include <linux/linkage.h>
13158+#include <linux/init.h>
13159 #include <asm/page_types.h>
13160
13161 /*
13162@@ -20,7 +21,7 @@
13163 * service functions will comply with gcc calling convention, too.
13164 */
13165
13166-.text
13167+__INIT
13168 ENTRY(efi_call_phys)
13169 /*
13170 * 0. The function can only be called in Linux kernel. So CS has been
13171@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
13172 * The mapping of lower virtual memory has been created in prelog and
13173 * epilog.
13174 */
13175- movl $1f, %edx
13176- subl $__PAGE_OFFSET, %edx
13177- jmp *%edx
13178+ jmp 1f-__PAGE_OFFSET
13179 1:
13180
13181 /*
13182@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
13183 * parameter 2, ..., param n. To make things easy, we save the return
13184 * address of efi_call_phys in a global variable.
13185 */
13186- popl %edx
13187- movl %edx, saved_return_addr
13188- /* get the function pointer into ECX*/
13189- popl %ecx
13190- movl %ecx, efi_rt_function_ptr
13191- movl $2f, %edx
13192- subl $__PAGE_OFFSET, %edx
13193- pushl %edx
13194+ popl (saved_return_addr)
13195+ popl (efi_rt_function_ptr)
13196
13197 /*
13198 * 3. Clear PG bit in %CR0.
13199@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
13200 /*
13201 * 5. Call the physical function.
13202 */
13203- jmp *%ecx
13204+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
13205
13206-2:
13207 /*
13208 * 6. After EFI runtime service returns, control will return to
13209 * following instruction. We'd better readjust stack pointer first.
13210@@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
13211 movl %cr0, %edx
13212 orl $0x80000000, %edx
13213 movl %edx, %cr0
13214- jmp 1f
13215-1:
13216+
13217 /*
13218 * 8. Now restore the virtual mode from flat mode by
13219 * adding EIP with PAGE_OFFSET.
13220 */
13221- movl $1f, %edx
13222- jmp *%edx
13223+ jmp 1f+__PAGE_OFFSET
13224 1:
13225
13226 /*
13227 * 9. Balance the stack. And because EAX contain the return value,
13228 * we'd better not clobber it.
13229 */
13230- leal efi_rt_function_ptr, %edx
13231- movl (%edx), %ecx
13232- pushl %ecx
13233+ pushl (efi_rt_function_ptr)
13234
13235 /*
13236- * 10. Push the saved return address onto the stack and return.
13237+ * 10. Return to the saved return address.
13238 */
13239- leal saved_return_addr, %edx
13240- movl (%edx), %ecx
13241- pushl %ecx
13242- ret
13243+ jmpl *(saved_return_addr)
13244 ENDPROC(efi_call_phys)
13245 .previous
13246
13247-.data
13248+__INITDATA
13249 saved_return_addr:
13250 .long 0
13251 efi_rt_function_ptr:
13252diff -urNp linux-2.6.32.46/arch/x86/kernel/entry_32.S linux-2.6.32.46/arch/x86/kernel/entry_32.S
13253--- linux-2.6.32.46/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
13254+++ linux-2.6.32.46/arch/x86/kernel/entry_32.S 2011-08-23 20:24:19.000000000 -0400
13255@@ -185,13 +185,146 @@
13256 /*CFI_REL_OFFSET gs, PT_GS*/
13257 .endm
13258 .macro SET_KERNEL_GS reg
13259+
13260+#ifdef CONFIG_CC_STACKPROTECTOR
13261 movl $(__KERNEL_STACK_CANARY), \reg
13262+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13263+ movl $(__USER_DS), \reg
13264+#else
13265+ xorl \reg, \reg
13266+#endif
13267+
13268 movl \reg, %gs
13269 .endm
13270
13271 #endif /* CONFIG_X86_32_LAZY_GS */
13272
13273-.macro SAVE_ALL
13274+.macro pax_enter_kernel
13275+#ifdef CONFIG_PAX_KERNEXEC
13276+ call pax_enter_kernel
13277+#endif
13278+.endm
13279+
13280+.macro pax_exit_kernel
13281+#ifdef CONFIG_PAX_KERNEXEC
13282+ call pax_exit_kernel
13283+#endif
13284+.endm
13285+
13286+#ifdef CONFIG_PAX_KERNEXEC
13287+ENTRY(pax_enter_kernel)
13288+#ifdef CONFIG_PARAVIRT
13289+ pushl %eax
13290+ pushl %ecx
13291+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
13292+ mov %eax, %esi
13293+#else
13294+ mov %cr0, %esi
13295+#endif
13296+ bts $16, %esi
13297+ jnc 1f
13298+ mov %cs, %esi
13299+ cmp $__KERNEL_CS, %esi
13300+ jz 3f
13301+ ljmp $__KERNEL_CS, $3f
13302+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
13303+2:
13304+#ifdef CONFIG_PARAVIRT
13305+ mov %esi, %eax
13306+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13307+#else
13308+ mov %esi, %cr0
13309+#endif
13310+3:
13311+#ifdef CONFIG_PARAVIRT
13312+ popl %ecx
13313+ popl %eax
13314+#endif
13315+ ret
13316+ENDPROC(pax_enter_kernel)
13317+
13318+ENTRY(pax_exit_kernel)
13319+#ifdef CONFIG_PARAVIRT
13320+ pushl %eax
13321+ pushl %ecx
13322+#endif
13323+ mov %cs, %esi
13324+ cmp $__KERNEXEC_KERNEL_CS, %esi
13325+ jnz 2f
13326+#ifdef CONFIG_PARAVIRT
13327+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
13328+ mov %eax, %esi
13329+#else
13330+ mov %cr0, %esi
13331+#endif
13332+ btr $16, %esi
13333+ ljmp $__KERNEL_CS, $1f
13334+1:
13335+#ifdef CONFIG_PARAVIRT
13336+ mov %esi, %eax
13337+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
13338+#else
13339+ mov %esi, %cr0
13340+#endif
13341+2:
13342+#ifdef CONFIG_PARAVIRT
13343+ popl %ecx
13344+ popl %eax
13345+#endif
13346+ ret
13347+ENDPROC(pax_exit_kernel)
13348+#endif
13349+
13350+.macro pax_erase_kstack
13351+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13352+ call pax_erase_kstack
13353+#endif
13354+.endm
13355+
13356+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13357+/*
13358+ * ebp: thread_info
13359+ * ecx, edx: can be clobbered
13360+ */
13361+ENTRY(pax_erase_kstack)
13362+ pushl %edi
13363+ pushl %eax
13364+
13365+ mov TI_lowest_stack(%ebp), %edi
13366+ mov $-0xBEEF, %eax
13367+ std
13368+
13369+1: mov %edi, %ecx
13370+ and $THREAD_SIZE_asm - 1, %ecx
13371+ shr $2, %ecx
13372+ repne scasl
13373+ jecxz 2f
13374+
13375+ cmp $2*16, %ecx
13376+ jc 2f
13377+
13378+ mov $2*16, %ecx
13379+ repe scasl
13380+ jecxz 2f
13381+ jne 1b
13382+
13383+2: cld
13384+ mov %esp, %ecx
13385+ sub %edi, %ecx
13386+ shr $2, %ecx
13387+ rep stosl
13388+
13389+ mov TI_task_thread_sp0(%ebp), %edi
13390+ sub $128, %edi
13391+ mov %edi, TI_lowest_stack(%ebp)
13392+
13393+ popl %eax
13394+ popl %edi
13395+ ret
13396+ENDPROC(pax_erase_kstack)
13397+#endif
13398+
13399+.macro __SAVE_ALL _DS
13400 cld
13401 PUSH_GS
13402 pushl %fs
13403@@ -224,7 +357,7 @@
13404 pushl %ebx
13405 CFI_ADJUST_CFA_OFFSET 4
13406 CFI_REL_OFFSET ebx, 0
13407- movl $(__USER_DS), %edx
13408+ movl $\_DS, %edx
13409 movl %edx, %ds
13410 movl %edx, %es
13411 movl $(__KERNEL_PERCPU), %edx
13412@@ -232,6 +365,15 @@
13413 SET_KERNEL_GS %edx
13414 .endm
13415
13416+.macro SAVE_ALL
13417+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13418+ __SAVE_ALL __KERNEL_DS
13419+ pax_enter_kernel
13420+#else
13421+ __SAVE_ALL __USER_DS
13422+#endif
13423+.endm
13424+
13425 .macro RESTORE_INT_REGS
13426 popl %ebx
13427 CFI_ADJUST_CFA_OFFSET -4
13428@@ -352,7 +494,15 @@ check_userspace:
13429 movb PT_CS(%esp), %al
13430 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13431 cmpl $USER_RPL, %eax
13432+
13433+#ifdef CONFIG_PAX_KERNEXEC
13434+ jae resume_userspace
13435+
13436+ PAX_EXIT_KERNEL
13437+ jmp resume_kernel
13438+#else
13439 jb resume_kernel # not returning to v8086 or userspace
13440+#endif
13441
13442 ENTRY(resume_userspace)
13443 LOCKDEP_SYS_EXIT
13444@@ -364,7 +514,7 @@ ENTRY(resume_userspace)
13445 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13446 # int/exception return?
13447 jne work_pending
13448- jmp restore_all
13449+ jmp restore_all_pax
13450 END(ret_from_exception)
13451
13452 #ifdef CONFIG_PREEMPT
13453@@ -414,25 +564,36 @@ sysenter_past_esp:
13454 /*CFI_REL_OFFSET cs, 0*/
13455 /*
13456 * Push current_thread_info()->sysenter_return to the stack.
13457- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13458- * pushed above; +8 corresponds to copy_thread's esp0 setting.
13459 */
13460- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
13461+ pushl $0
13462 CFI_ADJUST_CFA_OFFSET 4
13463 CFI_REL_OFFSET eip, 0
13464
13465 pushl %eax
13466 CFI_ADJUST_CFA_OFFSET 4
13467 SAVE_ALL
13468+ GET_THREAD_INFO(%ebp)
13469+ movl TI_sysenter_return(%ebp),%ebp
13470+ movl %ebp,PT_EIP(%esp)
13471 ENABLE_INTERRUPTS(CLBR_NONE)
13472
13473 /*
13474 * Load the potential sixth argument from user stack.
13475 * Careful about security.
13476 */
13477+ movl PT_OLDESP(%esp),%ebp
13478+
13479+#ifdef CONFIG_PAX_MEMORY_UDEREF
13480+ mov PT_OLDSS(%esp),%ds
13481+1: movl %ds:(%ebp),%ebp
13482+ push %ss
13483+ pop %ds
13484+#else
13485 cmpl $__PAGE_OFFSET-3,%ebp
13486 jae syscall_fault
13487 1: movl (%ebp),%ebp
13488+#endif
13489+
13490 movl %ebp,PT_EBP(%esp)
13491 .section __ex_table,"a"
13492 .align 4
13493@@ -455,12 +616,23 @@ sysenter_do_call:
13494 testl $_TIF_ALLWORK_MASK, %ecx
13495 jne sysexit_audit
13496 sysenter_exit:
13497+
13498+#ifdef CONFIG_PAX_RANDKSTACK
13499+ pushl_cfi %eax
13500+ call pax_randomize_kstack
13501+ popl_cfi %eax
13502+#endif
13503+
13504+ pax_erase_kstack
13505+
13506 /* if something modifies registers it must also disable sysexit */
13507 movl PT_EIP(%esp), %edx
13508 movl PT_OLDESP(%esp), %ecx
13509 xorl %ebp,%ebp
13510 TRACE_IRQS_ON
13511 1: mov PT_FS(%esp), %fs
13512+2: mov PT_DS(%esp), %ds
13513+3: mov PT_ES(%esp), %es
13514 PTGS_TO_GS
13515 ENABLE_INTERRUPTS_SYSEXIT
13516
13517@@ -477,6 +649,9 @@ sysenter_audit:
13518 movl %eax,%edx /* 2nd arg: syscall number */
13519 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13520 call audit_syscall_entry
13521+
13522+ pax_erase_kstack
13523+
13524 pushl %ebx
13525 CFI_ADJUST_CFA_OFFSET 4
13526 movl PT_EAX(%esp),%eax /* reload syscall number */
13527@@ -504,11 +679,17 @@ sysexit_audit:
13528
13529 CFI_ENDPROC
13530 .pushsection .fixup,"ax"
13531-2: movl $0,PT_FS(%esp)
13532+4: movl $0,PT_FS(%esp)
13533+ jmp 1b
13534+5: movl $0,PT_DS(%esp)
13535+ jmp 1b
13536+6: movl $0,PT_ES(%esp)
13537 jmp 1b
13538 .section __ex_table,"a"
13539 .align 4
13540- .long 1b,2b
13541+ .long 1b,4b
13542+ .long 2b,5b
13543+ .long 3b,6b
13544 .popsection
13545 PTGS_TO_GS_EX
13546 ENDPROC(ia32_sysenter_target)
13547@@ -538,6 +719,14 @@ syscall_exit:
13548 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13549 jne syscall_exit_work
13550
13551+restore_all_pax:
13552+
13553+#ifdef CONFIG_PAX_RANDKSTACK
13554+ call pax_randomize_kstack
13555+#endif
13556+
13557+ pax_erase_kstack
13558+
13559 restore_all:
13560 TRACE_IRQS_IRET
13561 restore_all_notrace:
13562@@ -602,10 +791,29 @@ ldt_ss:
13563 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13564 mov %dx, %ax /* eax: new kernel esp */
13565 sub %eax, %edx /* offset (low word is 0) */
13566- PER_CPU(gdt_page, %ebx)
13567+#ifdef CONFIG_SMP
13568+ movl PER_CPU_VAR(cpu_number), %ebx
13569+ shll $PAGE_SHIFT_asm, %ebx
13570+ addl $cpu_gdt_table, %ebx
13571+#else
13572+ movl $cpu_gdt_table, %ebx
13573+#endif
13574 shr $16, %edx
13575+
13576+#ifdef CONFIG_PAX_KERNEXEC
13577+ mov %cr0, %esi
13578+ btr $16, %esi
13579+ mov %esi, %cr0
13580+#endif
13581+
13582 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
13583 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
13584+
13585+#ifdef CONFIG_PAX_KERNEXEC
13586+ bts $16, %esi
13587+ mov %esi, %cr0
13588+#endif
13589+
13590 pushl $__ESPFIX_SS
13591 CFI_ADJUST_CFA_OFFSET 4
13592 push %eax /* new kernel esp */
13593@@ -636,31 +844,25 @@ work_resched:
13594 movl TI_flags(%ebp), %ecx
13595 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13596 # than syscall tracing?
13597- jz restore_all
13598+ jz restore_all_pax
13599 testb $_TIF_NEED_RESCHED, %cl
13600 jnz work_resched
13601
13602 work_notifysig: # deal with pending signals and
13603 # notify-resume requests
13604+ movl %esp, %eax
13605 #ifdef CONFIG_VM86
13606 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13607- movl %esp, %eax
13608- jne work_notifysig_v86 # returning to kernel-space or
13609+ jz 1f # returning to kernel-space or
13610 # vm86-space
13611- xorl %edx, %edx
13612- call do_notify_resume
13613- jmp resume_userspace_sig
13614
13615- ALIGN
13616-work_notifysig_v86:
13617 pushl %ecx # save ti_flags for do_notify_resume
13618 CFI_ADJUST_CFA_OFFSET 4
13619 call save_v86_state # %eax contains pt_regs pointer
13620 popl %ecx
13621 CFI_ADJUST_CFA_OFFSET -4
13622 movl %eax, %esp
13623-#else
13624- movl %esp, %eax
13625+1:
13626 #endif
13627 xorl %edx, %edx
13628 call do_notify_resume
13629@@ -673,6 +875,9 @@ syscall_trace_entry:
13630 movl $-ENOSYS,PT_EAX(%esp)
13631 movl %esp, %eax
13632 call syscall_trace_enter
13633+
13634+ pax_erase_kstack
13635+
13636 /* What it returned is what we'll actually use. */
13637 cmpl $(nr_syscalls), %eax
13638 jnae syscall_call
13639@@ -695,6 +900,10 @@ END(syscall_exit_work)
13640
13641 RING0_INT_FRAME # can't unwind into user space anyway
13642 syscall_fault:
13643+#ifdef CONFIG_PAX_MEMORY_UDEREF
13644+ push %ss
13645+ pop %ds
13646+#endif
13647 GET_THREAD_INFO(%ebp)
13648 movl $-EFAULT,PT_EAX(%esp)
13649 jmp resume_userspace
13650@@ -726,6 +935,33 @@ PTREGSCALL(rt_sigreturn)
13651 PTREGSCALL(vm86)
13652 PTREGSCALL(vm86old)
13653
13654+ ALIGN;
13655+ENTRY(kernel_execve)
13656+ push %ebp
13657+ sub $PT_OLDSS+4,%esp
13658+ push %edi
13659+ push %ecx
13660+ push %eax
13661+ lea 3*4(%esp),%edi
13662+ mov $PT_OLDSS/4+1,%ecx
13663+ xorl %eax,%eax
13664+ rep stosl
13665+ pop %eax
13666+ pop %ecx
13667+ pop %edi
13668+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13669+ mov %eax,PT_EBX(%esp)
13670+ mov %edx,PT_ECX(%esp)
13671+ mov %ecx,PT_EDX(%esp)
13672+ mov %esp,%eax
13673+ call sys_execve
13674+ GET_THREAD_INFO(%ebp)
13675+ test %eax,%eax
13676+ jz syscall_exit
13677+ add $PT_OLDSS+4,%esp
13678+ pop %ebp
13679+ ret
13680+
13681 .macro FIXUP_ESPFIX_STACK
13682 /*
13683 * Switch back for ESPFIX stack to the normal zerobased stack
13684@@ -735,7 +971,13 @@ PTREGSCALL(vm86old)
13685 * normal stack and adjusts ESP with the matching offset.
13686 */
13687 /* fixup the stack */
13688- PER_CPU(gdt_page, %ebx)
13689+#ifdef CONFIG_SMP
13690+ movl PER_CPU_VAR(cpu_number), %ebx
13691+ shll $PAGE_SHIFT_asm, %ebx
13692+ addl $cpu_gdt_table, %ebx
13693+#else
13694+ movl $cpu_gdt_table, %ebx
13695+#endif
13696 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
13697 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
13698 shl $16, %eax
13699@@ -1198,7 +1440,6 @@ return_to_handler:
13700 ret
13701 #endif
13702
13703-.section .rodata,"a"
13704 #include "syscall_table_32.S"
13705
13706 syscall_table_size=(.-sys_call_table)
13707@@ -1255,9 +1496,12 @@ error_code:
13708 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13709 REG_TO_PTGS %ecx
13710 SET_KERNEL_GS %ecx
13711- movl $(__USER_DS), %ecx
13712+ movl $(__KERNEL_DS), %ecx
13713 movl %ecx, %ds
13714 movl %ecx, %es
13715+
13716+ pax_enter_kernel
13717+
13718 TRACE_IRQS_OFF
13719 movl %esp,%eax # pt_regs pointer
13720 call *%edi
13721@@ -1351,6 +1595,9 @@ nmi_stack_correct:
13722 xorl %edx,%edx # zero error code
13723 movl %esp,%eax # pt_regs pointer
13724 call do_nmi
13725+
13726+ pax_exit_kernel
13727+
13728 jmp restore_all_notrace
13729 CFI_ENDPROC
13730
13731@@ -1391,6 +1638,9 @@ nmi_espfix_stack:
13732 FIXUP_ESPFIX_STACK # %eax == %esp
13733 xorl %edx,%edx # zero error code
13734 call do_nmi
13735+
13736+ pax_exit_kernel
13737+
13738 RESTORE_REGS
13739 lss 12+4(%esp), %esp # back to espfix stack
13740 CFI_ADJUST_CFA_OFFSET -24
13741diff -urNp linux-2.6.32.46/arch/x86/kernel/entry_64.S linux-2.6.32.46/arch/x86/kernel/entry_64.S
13742--- linux-2.6.32.46/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13743+++ linux-2.6.32.46/arch/x86/kernel/entry_64.S 2011-08-26 20:19:09.000000000 -0400
13744@@ -53,6 +53,7 @@
13745 #include <asm/paravirt.h>
13746 #include <asm/ftrace.h>
13747 #include <asm/percpu.h>
13748+#include <asm/pgtable.h>
13749
13750 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13751 #include <linux/elf-em.h>
13752@@ -174,6 +175,264 @@ ENTRY(native_usergs_sysret64)
13753 ENDPROC(native_usergs_sysret64)
13754 #endif /* CONFIG_PARAVIRT */
13755
13756+ .macro ljmpq sel, off
13757+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13758+ .byte 0x48; ljmp *1234f(%rip)
13759+ .pushsection .rodata
13760+ .align 16
13761+ 1234: .quad \off; .word \sel
13762+ .popsection
13763+#else
13764+ pushq $\sel
13765+ pushq $\off
13766+ lretq
13767+#endif
13768+ .endm
13769+
13770+ .macro pax_enter_kernel
13771+#ifdef CONFIG_PAX_KERNEXEC
13772+ call pax_enter_kernel
13773+#endif
13774+ .endm
13775+
13776+ .macro pax_exit_kernel
13777+#ifdef CONFIG_PAX_KERNEXEC
13778+ call pax_exit_kernel
13779+#endif
13780+ .endm
13781+
13782+#ifdef CONFIG_PAX_KERNEXEC
13783+ENTRY(pax_enter_kernel)
13784+ pushq %rdi
13785+
13786+#ifdef CONFIG_PARAVIRT
13787+ PV_SAVE_REGS(CLBR_RDI)
13788+#endif
13789+
13790+ GET_CR0_INTO_RDI
13791+ bts $16,%rdi
13792+ jnc 1f
13793+ mov %cs,%edi
13794+ cmp $__KERNEL_CS,%edi
13795+ jz 3f
13796+ ljmpq __KERNEL_CS,3f
13797+1: ljmpq __KERNEXEC_KERNEL_CS,2f
13798+2: SET_RDI_INTO_CR0
13799+3:
13800+
13801+#ifdef CONFIG_PARAVIRT
13802+ PV_RESTORE_REGS(CLBR_RDI)
13803+#endif
13804+
13805+ popq %rdi
13806+ retq
13807+ENDPROC(pax_enter_kernel)
13808+
13809+ENTRY(pax_exit_kernel)
13810+ pushq %rdi
13811+
13812+#ifdef CONFIG_PARAVIRT
13813+ PV_SAVE_REGS(CLBR_RDI)
13814+#endif
13815+
13816+ mov %cs,%rdi
13817+ cmp $__KERNEXEC_KERNEL_CS,%edi
13818+ jnz 2f
13819+ GET_CR0_INTO_RDI
13820+ btr $16,%rdi
13821+ ljmpq __KERNEL_CS,1f
13822+1: SET_RDI_INTO_CR0
13823+2:
13824+
13825+#ifdef CONFIG_PARAVIRT
13826+ PV_RESTORE_REGS(CLBR_RDI);
13827+#endif
13828+
13829+ popq %rdi
13830+ retq
13831+ENDPROC(pax_exit_kernel)
13832+#endif
13833+
13834+ .macro pax_enter_kernel_user
13835+#ifdef CONFIG_PAX_MEMORY_UDEREF
13836+ call pax_enter_kernel_user
13837+#endif
13838+ .endm
13839+
13840+ .macro pax_exit_kernel_user
13841+#ifdef CONFIG_PAX_MEMORY_UDEREF
13842+ call pax_exit_kernel_user
13843+#endif
13844+#ifdef CONFIG_PAX_RANDKSTACK
13845+ push %rax
13846+ call pax_randomize_kstack
13847+ pop %rax
13848+#endif
13849+ .endm
13850+
13851+#ifdef CONFIG_PAX_MEMORY_UDEREF
13852+ENTRY(pax_enter_kernel_user)
13853+ pushq %rdi
13854+ pushq %rbx
13855+
13856+#ifdef CONFIG_PARAVIRT
13857+ PV_SAVE_REGS(CLBR_RDI)
13858+#endif
13859+
13860+ GET_CR3_INTO_RDI
13861+ mov %rdi,%rbx
13862+ add $__START_KERNEL_map,%rbx
13863+ sub phys_base(%rip),%rbx
13864+
13865+#ifdef CONFIG_PARAVIRT
13866+ pushq %rdi
13867+ cmpl $0, pv_info+PARAVIRT_enabled
13868+ jz 1f
13869+ i = 0
13870+ .rept USER_PGD_PTRS
13871+ mov i*8(%rbx),%rsi
13872+ mov $0,%sil
13873+ lea i*8(%rbx),%rdi
13874+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13875+ i = i + 1
13876+ .endr
13877+ jmp 2f
13878+1:
13879+#endif
13880+
13881+ i = 0
13882+ .rept USER_PGD_PTRS
13883+ movb $0,i*8(%rbx)
13884+ i = i + 1
13885+ .endr
13886+
13887+#ifdef CONFIG_PARAVIRT
13888+2: popq %rdi
13889+#endif
13890+ SET_RDI_INTO_CR3
13891+
13892+#ifdef CONFIG_PAX_KERNEXEC
13893+ GET_CR0_INTO_RDI
13894+ bts $16,%rdi
13895+ SET_RDI_INTO_CR0
13896+#endif
13897+
13898+#ifdef CONFIG_PARAVIRT
13899+ PV_RESTORE_REGS(CLBR_RDI)
13900+#endif
13901+
13902+ popq %rbx
13903+ popq %rdi
13904+ retq
13905+ENDPROC(pax_enter_kernel_user)
13906+
13907+ENTRY(pax_exit_kernel_user)
13908+ push %rdi
13909+
13910+#ifdef CONFIG_PARAVIRT
13911+ pushq %rbx
13912+ PV_SAVE_REGS(CLBR_RDI)
13913+#endif
13914+
13915+#ifdef CONFIG_PAX_KERNEXEC
13916+ GET_CR0_INTO_RDI
13917+ btr $16,%rdi
13918+ SET_RDI_INTO_CR0
13919+#endif
13920+
13921+ GET_CR3_INTO_RDI
13922+ add $__START_KERNEL_map,%rdi
13923+ sub phys_base(%rip),%rdi
13924+
13925+#ifdef CONFIG_PARAVIRT
13926+ cmpl $0, pv_info+PARAVIRT_enabled
13927+ jz 1f
13928+ mov %rdi,%rbx
13929+ i = 0
13930+ .rept USER_PGD_PTRS
13931+ mov i*8(%rbx),%rsi
13932+ mov $0x67,%sil
13933+ lea i*8(%rbx),%rdi
13934+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13935+ i = i + 1
13936+ .endr
13937+ jmp 2f
13938+1:
13939+#endif
13940+
13941+ i = 0
13942+ .rept USER_PGD_PTRS
13943+ movb $0x67,i*8(%rdi)
13944+ i = i + 1
13945+ .endr
13946+
13947+#ifdef CONFIG_PARAVIRT
13948+2: PV_RESTORE_REGS(CLBR_RDI)
13949+ popq %rbx
13950+#endif
13951+
13952+ popq %rdi
13953+ retq
13954+ENDPROC(pax_exit_kernel_user)
13955+#endif
13956+
13957+.macro pax_erase_kstack
13958+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13959+ call pax_erase_kstack
13960+#endif
13961+.endm
13962+
13963+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13964+/*
13965+ * r10: thread_info
13966+ * rcx, rdx: can be clobbered
13967+ */
13968+ENTRY(pax_erase_kstack)
13969+ pushq %rdi
13970+ pushq %rax
13971+ pushq %r10
13972+
13973+ GET_THREAD_INFO(%r10)
13974+ mov TI_lowest_stack(%r10), %rdi
13975+ mov $-0xBEEF, %rax
13976+ std
13977+
13978+1: mov %edi, %ecx
13979+ and $THREAD_SIZE_asm - 1, %ecx
13980+ shr $3, %ecx
13981+ repne scasq
13982+ jecxz 2f
13983+
13984+ cmp $2*8, %ecx
13985+ jc 2f
13986+
13987+ mov $2*8, %ecx
13988+ repe scasq
13989+ jecxz 2f
13990+ jne 1b
13991+
13992+2: cld
13993+ mov %esp, %ecx
13994+ sub %edi, %ecx
13995+
13996+ cmp $THREAD_SIZE_asm, %rcx
13997+ jb 3f
13998+ ud2
13999+3:
14000+
14001+ shr $3, %ecx
14002+ rep stosq
14003+
14004+ mov TI_task_thread_sp0(%r10), %rdi
14005+ sub $256, %rdi
14006+ mov %rdi, TI_lowest_stack(%r10)
14007+
14008+ popq %r10
14009+ popq %rax
14010+ popq %rdi
14011+ ret
14012+ENDPROC(pax_erase_kstack)
14013+#endif
14014
14015 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
14016 #ifdef CONFIG_TRACE_IRQFLAGS
14017@@ -317,7 +576,7 @@ ENTRY(save_args)
14018 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
14019 movq_cfi rbp, 8 /* push %rbp */
14020 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
14021- testl $3, CS(%rdi)
14022+ testb $3, CS(%rdi)
14023 je 1f
14024 SWAPGS
14025 /*
14026@@ -409,7 +668,7 @@ ENTRY(ret_from_fork)
14027
14028 RESTORE_REST
14029
14030- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14031+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
14032 je int_ret_from_sys_call
14033
14034 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
14035@@ -455,7 +714,7 @@ END(ret_from_fork)
14036 ENTRY(system_call)
14037 CFI_STARTPROC simple
14038 CFI_SIGNAL_FRAME
14039- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14040+ CFI_DEF_CFA rsp,0
14041 CFI_REGISTER rip,rcx
14042 /*CFI_REGISTER rflags,r11*/
14043 SWAPGS_UNSAFE_STACK
14044@@ -468,12 +727,13 @@ ENTRY(system_call_after_swapgs)
14045
14046 movq %rsp,PER_CPU_VAR(old_rsp)
14047 movq PER_CPU_VAR(kernel_stack),%rsp
14048+ pax_enter_kernel_user
14049 /*
14050 * No need to follow this irqs off/on section - it's straight
14051 * and short:
14052 */
14053 ENABLE_INTERRUPTS(CLBR_NONE)
14054- SAVE_ARGS 8,1
14055+ SAVE_ARGS 8*6,1
14056 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14057 movq %rcx,RIP-ARGOFFSET(%rsp)
14058 CFI_REL_OFFSET rip,RIP-ARGOFFSET
14059@@ -502,6 +762,8 @@ sysret_check:
14060 andl %edi,%edx
14061 jnz sysret_careful
14062 CFI_REMEMBER_STATE
14063+ pax_exit_kernel_user
14064+ pax_erase_kstack
14065 /*
14066 * sysretq will re-enable interrupts:
14067 */
14068@@ -562,6 +824,9 @@ auditsys:
14069 movq %rax,%rsi /* 2nd arg: syscall number */
14070 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
14071 call audit_syscall_entry
14072+
14073+ pax_erase_kstack
14074+
14075 LOAD_ARGS 0 /* reload call-clobbered registers */
14076 jmp system_call_fastpath
14077
14078@@ -592,6 +857,9 @@ tracesys:
14079 FIXUP_TOP_OF_STACK %rdi
14080 movq %rsp,%rdi
14081 call syscall_trace_enter
14082+
14083+ pax_erase_kstack
14084+
14085 /*
14086 * Reload arg registers from stack in case ptrace changed them.
14087 * We don't reload %rax because syscall_trace_enter() returned
14088@@ -613,7 +881,7 @@ tracesys:
14089 GLOBAL(int_ret_from_sys_call)
14090 DISABLE_INTERRUPTS(CLBR_NONE)
14091 TRACE_IRQS_OFF
14092- testl $3,CS-ARGOFFSET(%rsp)
14093+ testb $3,CS-ARGOFFSET(%rsp)
14094 je retint_restore_args
14095 movl $_TIF_ALLWORK_MASK,%edi
14096 /* edi: mask to check */
14097@@ -800,6 +1068,16 @@ END(interrupt)
14098 CFI_ADJUST_CFA_OFFSET 10*8
14099 call save_args
14100 PARTIAL_FRAME 0
14101+#ifdef CONFIG_PAX_MEMORY_UDEREF
14102+ testb $3, CS(%rdi)
14103+ jnz 1f
14104+ pax_enter_kernel
14105+ jmp 2f
14106+1: pax_enter_kernel_user
14107+2:
14108+#else
14109+ pax_enter_kernel
14110+#endif
14111 call \func
14112 .endm
14113
14114@@ -822,7 +1100,7 @@ ret_from_intr:
14115 CFI_ADJUST_CFA_OFFSET -8
14116 exit_intr:
14117 GET_THREAD_INFO(%rcx)
14118- testl $3,CS-ARGOFFSET(%rsp)
14119+ testb $3,CS-ARGOFFSET(%rsp)
14120 je retint_kernel
14121
14122 /* Interrupt came from user space */
14123@@ -844,12 +1122,15 @@ retint_swapgs: /* return to user-space
14124 * The iretq could re-enable interrupts:
14125 */
14126 DISABLE_INTERRUPTS(CLBR_ANY)
14127+ pax_exit_kernel_user
14128+ pax_erase_kstack
14129 TRACE_IRQS_IRETQ
14130 SWAPGS
14131 jmp restore_args
14132
14133 retint_restore_args: /* return to kernel space */
14134 DISABLE_INTERRUPTS(CLBR_ANY)
14135+ pax_exit_kernel
14136 /*
14137 * The iretq could re-enable interrupts:
14138 */
14139@@ -1032,6 +1313,16 @@ ENTRY(\sym)
14140 CFI_ADJUST_CFA_OFFSET 15*8
14141 call error_entry
14142 DEFAULT_FRAME 0
14143+#ifdef CONFIG_PAX_MEMORY_UDEREF
14144+ testb $3, CS(%rsp)
14145+ jnz 1f
14146+ pax_enter_kernel
14147+ jmp 2f
14148+1: pax_enter_kernel_user
14149+2:
14150+#else
14151+ pax_enter_kernel
14152+#endif
14153 movq %rsp,%rdi /* pt_regs pointer */
14154 xorl %esi,%esi /* no error code */
14155 call \do_sym
14156@@ -1049,6 +1340,16 @@ ENTRY(\sym)
14157 subq $15*8, %rsp
14158 call save_paranoid
14159 TRACE_IRQS_OFF
14160+#ifdef CONFIG_PAX_MEMORY_UDEREF
14161+ testb $3, CS(%rsp)
14162+ jnz 1f
14163+ pax_enter_kernel
14164+ jmp 2f
14165+1: pax_enter_kernel_user
14166+2:
14167+#else
14168+ pax_enter_kernel
14169+#endif
14170 movq %rsp,%rdi /* pt_regs pointer */
14171 xorl %esi,%esi /* no error code */
14172 call \do_sym
14173@@ -1066,9 +1367,24 @@ ENTRY(\sym)
14174 subq $15*8, %rsp
14175 call save_paranoid
14176 TRACE_IRQS_OFF
14177+#ifdef CONFIG_PAX_MEMORY_UDEREF
14178+ testb $3, CS(%rsp)
14179+ jnz 1f
14180+ pax_enter_kernel
14181+ jmp 2f
14182+1: pax_enter_kernel_user
14183+2:
14184+#else
14185+ pax_enter_kernel
14186+#endif
14187 movq %rsp,%rdi /* pt_regs pointer */
14188 xorl %esi,%esi /* no error code */
14189- PER_CPU(init_tss, %rbp)
14190+#ifdef CONFIG_SMP
14191+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
14192+ lea init_tss(%rbp), %rbp
14193+#else
14194+ lea init_tss(%rip), %rbp
14195+#endif
14196 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14197 call \do_sym
14198 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14199@@ -1085,6 +1401,16 @@ ENTRY(\sym)
14200 CFI_ADJUST_CFA_OFFSET 15*8
14201 call error_entry
14202 DEFAULT_FRAME 0
14203+#ifdef CONFIG_PAX_MEMORY_UDEREF
14204+ testb $3, CS(%rsp)
14205+ jnz 1f
14206+ pax_enter_kernel
14207+ jmp 2f
14208+1: pax_enter_kernel_user
14209+2:
14210+#else
14211+ pax_enter_kernel
14212+#endif
14213 movq %rsp,%rdi /* pt_regs pointer */
14214 movq ORIG_RAX(%rsp),%rsi /* get error code */
14215 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14216@@ -1104,6 +1430,16 @@ ENTRY(\sym)
14217 call save_paranoid
14218 DEFAULT_FRAME 0
14219 TRACE_IRQS_OFF
14220+#ifdef CONFIG_PAX_MEMORY_UDEREF
14221+ testb $3, CS(%rsp)
14222+ jnz 1f
14223+ pax_enter_kernel
14224+ jmp 2f
14225+1: pax_enter_kernel_user
14226+2:
14227+#else
14228+ pax_enter_kernel
14229+#endif
14230 movq %rsp,%rdi /* pt_regs pointer */
14231 movq ORIG_RAX(%rsp),%rsi /* get error code */
14232 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14233@@ -1405,14 +1741,27 @@ ENTRY(paranoid_exit)
14234 TRACE_IRQS_OFF
14235 testl %ebx,%ebx /* swapgs needed? */
14236 jnz paranoid_restore
14237- testl $3,CS(%rsp)
14238+ testb $3,CS(%rsp)
14239 jnz paranoid_userspace
14240+#ifdef CONFIG_PAX_MEMORY_UDEREF
14241+ pax_exit_kernel
14242+ TRACE_IRQS_IRETQ 0
14243+ SWAPGS_UNSAFE_STACK
14244+ RESTORE_ALL 8
14245+ jmp irq_return
14246+#endif
14247 paranoid_swapgs:
14248+#ifdef CONFIG_PAX_MEMORY_UDEREF
14249+ pax_exit_kernel_user
14250+#else
14251+ pax_exit_kernel
14252+#endif
14253 TRACE_IRQS_IRETQ 0
14254 SWAPGS_UNSAFE_STACK
14255 RESTORE_ALL 8
14256 jmp irq_return
14257 paranoid_restore:
14258+ pax_exit_kernel
14259 TRACE_IRQS_IRETQ 0
14260 RESTORE_ALL 8
14261 jmp irq_return
14262@@ -1470,7 +1819,7 @@ ENTRY(error_entry)
14263 movq_cfi r14, R14+8
14264 movq_cfi r15, R15+8
14265 xorl %ebx,%ebx
14266- testl $3,CS+8(%rsp)
14267+ testb $3,CS+8(%rsp)
14268 je error_kernelspace
14269 error_swapgs:
14270 SWAPGS
14271@@ -1529,6 +1878,16 @@ ENTRY(nmi)
14272 CFI_ADJUST_CFA_OFFSET 15*8
14273 call save_paranoid
14274 DEFAULT_FRAME 0
14275+#ifdef CONFIG_PAX_MEMORY_UDEREF
14276+ testb $3, CS(%rsp)
14277+ jnz 1f
14278+ pax_enter_kernel
14279+ jmp 2f
14280+1: pax_enter_kernel_user
14281+2:
14282+#else
14283+ pax_enter_kernel
14284+#endif
14285 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14286 movq %rsp,%rdi
14287 movq $-1,%rsi
14288@@ -1539,11 +1898,25 @@ ENTRY(nmi)
14289 DISABLE_INTERRUPTS(CLBR_NONE)
14290 testl %ebx,%ebx /* swapgs needed? */
14291 jnz nmi_restore
14292- testl $3,CS(%rsp)
14293+ testb $3,CS(%rsp)
14294 jnz nmi_userspace
14295+#ifdef CONFIG_PAX_MEMORY_UDEREF
14296+ pax_exit_kernel
14297+ SWAPGS_UNSAFE_STACK
14298+ RESTORE_ALL 8
14299+ jmp irq_return
14300+#endif
14301 nmi_swapgs:
14302+#ifdef CONFIG_PAX_MEMORY_UDEREF
14303+ pax_exit_kernel_user
14304+#else
14305+ pax_exit_kernel
14306+#endif
14307 SWAPGS_UNSAFE_STACK
14308+ RESTORE_ALL 8
14309+ jmp irq_return
14310 nmi_restore:
14311+ pax_exit_kernel
14312 RESTORE_ALL 8
14313 jmp irq_return
14314 nmi_userspace:
14315diff -urNp linux-2.6.32.46/arch/x86/kernel/ftrace.c linux-2.6.32.46/arch/x86/kernel/ftrace.c
14316--- linux-2.6.32.46/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
14317+++ linux-2.6.32.46/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
14318@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
14319 static void *mod_code_newcode; /* holds the text to write to the IP */
14320
14321 static unsigned nmi_wait_count;
14322-static atomic_t nmi_update_count = ATOMIC_INIT(0);
14323+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14324
14325 int ftrace_arch_read_dyn_info(char *buf, int size)
14326 {
14327@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
14328
14329 r = snprintf(buf, size, "%u %u",
14330 nmi_wait_count,
14331- atomic_read(&nmi_update_count));
14332+ atomic_read_unchecked(&nmi_update_count));
14333 return r;
14334 }
14335
14336@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
14337 {
14338 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14339 smp_rmb();
14340+ pax_open_kernel();
14341 ftrace_mod_code();
14342- atomic_inc(&nmi_update_count);
14343+ pax_close_kernel();
14344+ atomic_inc_unchecked(&nmi_update_count);
14345 }
14346 /* Must have previous changes seen before executions */
14347 smp_mb();
14348@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
14349
14350
14351
14352-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
14353+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
14354
14355 static unsigned char *ftrace_nop_replace(void)
14356 {
14357@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
14358 {
14359 unsigned char replaced[MCOUNT_INSN_SIZE];
14360
14361+ ip = ktla_ktva(ip);
14362+
14363 /*
14364 * Note: Due to modules and __init, code can
14365 * disappear and change, we need to protect against faulting
14366@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
14367 unsigned char old[MCOUNT_INSN_SIZE], *new;
14368 int ret;
14369
14370- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14371+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14372 new = ftrace_call_replace(ip, (unsigned long)func);
14373 ret = ftrace_modify_code(ip, old, new);
14374
14375@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
14376 switch (faulted) {
14377 case 0:
14378 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
14379- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
14380+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
14381 break;
14382 case 1:
14383 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
14384- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
14385+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
14386 break;
14387 case 2:
14388 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
14389- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
14390+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
14391 break;
14392 }
14393
14394@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
14395 {
14396 unsigned char code[MCOUNT_INSN_SIZE];
14397
14398+ ip = ktla_ktva(ip);
14399+
14400 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14401 return -EFAULT;
14402
14403diff -urNp linux-2.6.32.46/arch/x86/kernel/head32.c linux-2.6.32.46/arch/x86/kernel/head32.c
14404--- linux-2.6.32.46/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
14405+++ linux-2.6.32.46/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
14406@@ -16,6 +16,7 @@
14407 #include <asm/apic.h>
14408 #include <asm/io_apic.h>
14409 #include <asm/bios_ebda.h>
14410+#include <asm/boot.h>
14411
14412 static void __init i386_default_early_setup(void)
14413 {
14414@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
14415 {
14416 reserve_trampoline_memory();
14417
14418- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14419+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14420
14421 #ifdef CONFIG_BLK_DEV_INITRD
14422 /* Reserve INITRD */
14423diff -urNp linux-2.6.32.46/arch/x86/kernel/head_32.S linux-2.6.32.46/arch/x86/kernel/head_32.S
14424--- linux-2.6.32.46/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
14425+++ linux-2.6.32.46/arch/x86/kernel/head_32.S 2011-07-06 19:53:33.000000000 -0400
14426@@ -19,10 +19,17 @@
14427 #include <asm/setup.h>
14428 #include <asm/processor-flags.h>
14429 #include <asm/percpu.h>
14430+#include <asm/msr-index.h>
14431
14432 /* Physical address */
14433 #define pa(X) ((X) - __PAGE_OFFSET)
14434
14435+#ifdef CONFIG_PAX_KERNEXEC
14436+#define ta(X) (X)
14437+#else
14438+#define ta(X) ((X) - __PAGE_OFFSET)
14439+#endif
14440+
14441 /*
14442 * References to members of the new_cpu_data structure.
14443 */
14444@@ -52,11 +59,7 @@
14445 * and small than max_low_pfn, otherwise will waste some page table entries
14446 */
14447
14448-#if PTRS_PER_PMD > 1
14449-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14450-#else
14451-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14452-#endif
14453+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14454
14455 /* Enough space to fit pagetables for the low memory linear map */
14456 MAPPING_BEYOND_END = \
14457@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
14458 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14459
14460 /*
14461+ * Real beginning of normal "text" segment
14462+ */
14463+ENTRY(stext)
14464+ENTRY(_stext)
14465+
14466+/*
14467 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14468 * %esi points to the real-mode code as a 32-bit pointer.
14469 * CS and DS must be 4 GB flat segments, but we don't depend on
14470@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14471 * can.
14472 */
14473 __HEAD
14474+
14475+#ifdef CONFIG_PAX_KERNEXEC
14476+ jmp startup_32
14477+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14478+.fill PAGE_SIZE-5,1,0xcc
14479+#endif
14480+
14481 ENTRY(startup_32)
14482+ movl pa(stack_start),%ecx
14483+
14484 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
14485 us to not reload segments */
14486 testb $(1<<6), BP_loadflags(%esi)
14487@@ -95,7 +113,60 @@ ENTRY(startup_32)
14488 movl %eax,%es
14489 movl %eax,%fs
14490 movl %eax,%gs
14491+ movl %eax,%ss
14492 2:
14493+ leal -__PAGE_OFFSET(%ecx),%esp
14494+
14495+#ifdef CONFIG_SMP
14496+ movl $pa(cpu_gdt_table),%edi
14497+ movl $__per_cpu_load,%eax
14498+ movw %ax,__KERNEL_PERCPU + 2(%edi)
14499+ rorl $16,%eax
14500+ movb %al,__KERNEL_PERCPU + 4(%edi)
14501+ movb %ah,__KERNEL_PERCPU + 7(%edi)
14502+ movl $__per_cpu_end - 1,%eax
14503+ subl $__per_cpu_start,%eax
14504+ movw %ax,__KERNEL_PERCPU + 0(%edi)
14505+#endif
14506+
14507+#ifdef CONFIG_PAX_MEMORY_UDEREF
14508+ movl $NR_CPUS,%ecx
14509+ movl $pa(cpu_gdt_table),%edi
14510+1:
14511+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14512+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14513+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14514+ addl $PAGE_SIZE_asm,%edi
14515+ loop 1b
14516+#endif
14517+
14518+#ifdef CONFIG_PAX_KERNEXEC
14519+ movl $pa(boot_gdt),%edi
14520+ movl $__LOAD_PHYSICAL_ADDR,%eax
14521+ movw %ax,__BOOT_CS + 2(%edi)
14522+ rorl $16,%eax
14523+ movb %al,__BOOT_CS + 4(%edi)
14524+ movb %ah,__BOOT_CS + 7(%edi)
14525+ rorl $16,%eax
14526+
14527+ ljmp $(__BOOT_CS),$1f
14528+1:
14529+
14530+ movl $NR_CPUS,%ecx
14531+ movl $pa(cpu_gdt_table),%edi
14532+ addl $__PAGE_OFFSET,%eax
14533+1:
14534+ movw %ax,__KERNEL_CS + 2(%edi)
14535+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14536+ rorl $16,%eax
14537+ movb %al,__KERNEL_CS + 4(%edi)
14538+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14539+ movb %ah,__KERNEL_CS + 7(%edi)
14540+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14541+ rorl $16,%eax
14542+ addl $PAGE_SIZE_asm,%edi
14543+ loop 1b
14544+#endif
14545
14546 /*
14547 * Clear BSS first so that there are no surprises...
14548@@ -140,9 +211,7 @@ ENTRY(startup_32)
14549 cmpl $num_subarch_entries, %eax
14550 jae bad_subarch
14551
14552- movl pa(subarch_entries)(,%eax,4), %eax
14553- subl $__PAGE_OFFSET, %eax
14554- jmp *%eax
14555+ jmp *pa(subarch_entries)(,%eax,4)
14556
14557 bad_subarch:
14558 WEAK(lguest_entry)
14559@@ -154,10 +223,10 @@ WEAK(xen_entry)
14560 __INITDATA
14561
14562 subarch_entries:
14563- .long default_entry /* normal x86/PC */
14564- .long lguest_entry /* lguest hypervisor */
14565- .long xen_entry /* Xen hypervisor */
14566- .long default_entry /* Moorestown MID */
14567+ .long ta(default_entry) /* normal x86/PC */
14568+ .long ta(lguest_entry) /* lguest hypervisor */
14569+ .long ta(xen_entry) /* Xen hypervisor */
14570+ .long ta(default_entry) /* Moorestown MID */
14571 num_subarch_entries = (. - subarch_entries) / 4
14572 .previous
14573 #endif /* CONFIG_PARAVIRT */
14574@@ -218,8 +287,11 @@ default_entry:
14575 movl %eax, pa(max_pfn_mapped)
14576
14577 /* Do early initialization of the fixmap area */
14578- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14579- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14580+#ifdef CONFIG_COMPAT_VDSO
14581+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14582+#else
14583+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14584+#endif
14585 #else /* Not PAE */
14586
14587 page_pde_offset = (__PAGE_OFFSET >> 20);
14588@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14589 movl %eax, pa(max_pfn_mapped)
14590
14591 /* Do early initialization of the fixmap area */
14592- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14593- movl %eax,pa(swapper_pg_dir+0xffc)
14594+#ifdef CONFIG_COMPAT_VDSO
14595+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
14596+#else
14597+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
14598+#endif
14599 #endif
14600 jmp 3f
14601 /*
14602@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
14603 movl %eax,%es
14604 movl %eax,%fs
14605 movl %eax,%gs
14606+ movl pa(stack_start),%ecx
14607+ movl %eax,%ss
14608+ leal -__PAGE_OFFSET(%ecx),%esp
14609 #endif /* CONFIG_SMP */
14610 3:
14611
14612@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
14613 orl %edx,%eax
14614 movl %eax,%cr4
14615
14616+#ifdef CONFIG_X86_PAE
14617 btl $5, %eax # check if PAE is enabled
14618 jnc 6f
14619
14620@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
14621 cpuid
14622 cmpl $0x80000000, %eax
14623 jbe 6f
14624+
14625+ /* Clear bogus XD_DISABLE bits */
14626+ call verify_cpu
14627+
14628 mov $0x80000001, %eax
14629 cpuid
14630 /* Execute Disable bit supported? */
14631@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
14632 jnc 6f
14633
14634 /* Setup EFER (Extended Feature Enable Register) */
14635- movl $0xc0000080, %ecx
14636+ movl $MSR_EFER, %ecx
14637 rdmsr
14638
14639 btsl $11, %eax
14640 /* Make changes effective */
14641 wrmsr
14642
14643+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14644+ movl $1,pa(nx_enabled)
14645+#endif
14646+
14647 6:
14648
14649 /*
14650@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
14651 movl %eax,%cr0 /* ..and set paging (PG) bit */
14652 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
14653 1:
14654- /* Set up the stack pointer */
14655- lss stack_start,%esp
14656+ /* Shift the stack pointer to a virtual address */
14657+ addl $__PAGE_OFFSET, %esp
14658
14659 /*
14660 * Initialize eflags. Some BIOS's leave bits like NT set. This would
14661@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
14662
14663 #ifdef CONFIG_SMP
14664 cmpb $0, ready
14665- jz 1f /* Initial CPU cleans BSS */
14666- jmp checkCPUtype
14667-1:
14668+ jnz checkCPUtype
14669 #endif /* CONFIG_SMP */
14670
14671 /*
14672@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
14673 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14674 movl %eax,%ss # after changing gdt.
14675
14676- movl $(__USER_DS),%eax # DS/ES contains default USER segment
14677+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14678 movl %eax,%ds
14679 movl %eax,%es
14680
14681@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
14682 */
14683 cmpb $0,ready
14684 jne 1f
14685- movl $per_cpu__gdt_page,%eax
14686+ movl $cpu_gdt_table,%eax
14687 movl $per_cpu__stack_canary,%ecx
14688+#ifdef CONFIG_SMP
14689+ addl $__per_cpu_load,%ecx
14690+#endif
14691 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14692 shrl $16, %ecx
14693 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14694 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14695 1:
14696-#endif
14697 movl $(__KERNEL_STACK_CANARY),%eax
14698+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14699+ movl $(__USER_DS),%eax
14700+#else
14701+ xorl %eax,%eax
14702+#endif
14703 movl %eax,%gs
14704
14705 xorl %eax,%eax # Clear LDT
14706@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
14707
14708 cld # gcc2 wants the direction flag cleared at all times
14709 pushl $0 # fake return address for unwinder
14710-#ifdef CONFIG_SMP
14711- movb ready, %cl
14712 movb $1, ready
14713- cmpb $0,%cl # the first CPU calls start_kernel
14714- je 1f
14715- movl (stack_start), %esp
14716-1:
14717-#endif /* CONFIG_SMP */
14718 jmp *(initial_code)
14719
14720 /*
14721@@ -546,22 +631,22 @@ early_page_fault:
14722 jmp early_fault
14723
14724 early_fault:
14725- cld
14726 #ifdef CONFIG_PRINTK
14727+ cmpl $1,%ss:early_recursion_flag
14728+ je hlt_loop
14729+ incl %ss:early_recursion_flag
14730+ cld
14731 pusha
14732 movl $(__KERNEL_DS),%eax
14733 movl %eax,%ds
14734 movl %eax,%es
14735- cmpl $2,early_recursion_flag
14736- je hlt_loop
14737- incl early_recursion_flag
14738 movl %cr2,%eax
14739 pushl %eax
14740 pushl %edx /* trapno */
14741 pushl $fault_msg
14742 call printk
14743+; call dump_stack
14744 #endif
14745- call dump_stack
14746 hlt_loop:
14747 hlt
14748 jmp hlt_loop
14749@@ -569,8 +654,11 @@ hlt_loop:
14750 /* This is the default interrupt "handler" :-) */
14751 ALIGN
14752 ignore_int:
14753- cld
14754 #ifdef CONFIG_PRINTK
14755+ cmpl $2,%ss:early_recursion_flag
14756+ je hlt_loop
14757+ incl %ss:early_recursion_flag
14758+ cld
14759 pushl %eax
14760 pushl %ecx
14761 pushl %edx
14762@@ -579,9 +667,6 @@ ignore_int:
14763 movl $(__KERNEL_DS),%eax
14764 movl %eax,%ds
14765 movl %eax,%es
14766- cmpl $2,early_recursion_flag
14767- je hlt_loop
14768- incl early_recursion_flag
14769 pushl 16(%esp)
14770 pushl 24(%esp)
14771 pushl 32(%esp)
14772@@ -600,6 +685,8 @@ ignore_int:
14773 #endif
14774 iret
14775
14776+#include "verify_cpu.S"
14777+
14778 __REFDATA
14779 .align 4
14780 ENTRY(initial_code)
14781@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
14782 /*
14783 * BSS section
14784 */
14785-__PAGE_ALIGNED_BSS
14786- .align PAGE_SIZE_asm
14787 #ifdef CONFIG_X86_PAE
14788+.section .swapper_pg_pmd,"a",@progbits
14789 swapper_pg_pmd:
14790 .fill 1024*KPMDS,4,0
14791 #else
14792+.section .swapper_pg_dir,"a",@progbits
14793 ENTRY(swapper_pg_dir)
14794 .fill 1024,4,0
14795 #endif
14796+.section .swapper_pg_fixmap,"a",@progbits
14797 swapper_pg_fixmap:
14798 .fill 1024,4,0
14799 #ifdef CONFIG_X86_TRAMPOLINE
14800+.section .trampoline_pg_dir,"a",@progbits
14801 ENTRY(trampoline_pg_dir)
14802+#ifdef CONFIG_X86_PAE
14803+ .fill 4,8,0
14804+#else
14805 .fill 1024,4,0
14806 #endif
14807+#endif
14808+
14809+.section .empty_zero_page,"a",@progbits
14810 ENTRY(empty_zero_page)
14811 .fill 4096,1,0
14812
14813 /*
14814+ * The IDT has to be page-aligned to simplify the Pentium
14815+ * F0 0F bug workaround.. We have a special link segment
14816+ * for this.
14817+ */
14818+.section .idt,"a",@progbits
14819+ENTRY(idt_table)
14820+ .fill 256,8,0
14821+
14822+/*
14823 * This starts the data section.
14824 */
14825 #ifdef CONFIG_X86_PAE
14826-__PAGE_ALIGNED_DATA
14827- /* Page-aligned for the benefit of paravirt? */
14828- .align PAGE_SIZE_asm
14829+.section .swapper_pg_dir,"a",@progbits
14830+
14831 ENTRY(swapper_pg_dir)
14832 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14833 # if KPMDS == 3
14834@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
14835 # error "Kernel PMDs should be 1, 2 or 3"
14836 # endif
14837 .align PAGE_SIZE_asm /* needs to be page-sized too */
14838+
14839+#ifdef CONFIG_PAX_PER_CPU_PGD
14840+ENTRY(cpu_pgd)
14841+ .rept NR_CPUS
14842+ .fill 4,8,0
14843+ .endr
14844+#endif
14845+
14846 #endif
14847
14848 .data
14849+.balign 4
14850 ENTRY(stack_start)
14851- .long init_thread_union+THREAD_SIZE
14852- .long __BOOT_DS
14853+ .long init_thread_union+THREAD_SIZE-8
14854
14855 ready: .byte 0
14856
14857+.section .rodata,"a",@progbits
14858 early_recursion_flag:
14859 .long 0
14860
14861@@ -697,7 +809,7 @@ fault_msg:
14862 .word 0 # 32 bit align gdt_desc.address
14863 boot_gdt_descr:
14864 .word __BOOT_DS+7
14865- .long boot_gdt - __PAGE_OFFSET
14866+ .long pa(boot_gdt)
14867
14868 .word 0 # 32-bit align idt_desc.address
14869 idt_descr:
14870@@ -708,7 +820,7 @@ idt_descr:
14871 .word 0 # 32 bit align gdt_desc.address
14872 ENTRY(early_gdt_descr)
14873 .word GDT_ENTRIES*8-1
14874- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14875+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
14876
14877 /*
14878 * The boot_gdt must mirror the equivalent in setup.S and is
14879@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
14880 .align L1_CACHE_BYTES
14881 ENTRY(boot_gdt)
14882 .fill GDT_ENTRY_BOOT_CS,8,0
14883- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14884- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14885+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14886+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14887+
14888+ .align PAGE_SIZE_asm
14889+ENTRY(cpu_gdt_table)
14890+ .rept NR_CPUS
14891+ .quad 0x0000000000000000 /* NULL descriptor */
14892+ .quad 0x0000000000000000 /* 0x0b reserved */
14893+ .quad 0x0000000000000000 /* 0x13 reserved */
14894+ .quad 0x0000000000000000 /* 0x1b reserved */
14895+
14896+#ifdef CONFIG_PAX_KERNEXEC
14897+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14898+#else
14899+ .quad 0x0000000000000000 /* 0x20 unused */
14900+#endif
14901+
14902+ .quad 0x0000000000000000 /* 0x28 unused */
14903+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14904+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14905+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14906+ .quad 0x0000000000000000 /* 0x4b reserved */
14907+ .quad 0x0000000000000000 /* 0x53 reserved */
14908+ .quad 0x0000000000000000 /* 0x5b reserved */
14909+
14910+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14911+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14912+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14913+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14914+
14915+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14916+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14917+
14918+ /*
14919+ * Segments used for calling PnP BIOS have byte granularity.
14920+ * The code segments and data segments have fixed 64k limits,
14921+ * the transfer segment sizes are set at run time.
14922+ */
14923+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
14924+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
14925+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
14926+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
14927+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
14928+
14929+ /*
14930+ * The APM segments have byte granularity and their bases
14931+ * are set at run time. All have 64k limits.
14932+ */
14933+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14934+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14935+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
14936+
14937+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14938+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14939+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14940+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14941+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14942+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14943+
14944+ /* Be sure this is zeroed to avoid false validations in Xen */
14945+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14946+ .endr
14947diff -urNp linux-2.6.32.46/arch/x86/kernel/head_64.S linux-2.6.32.46/arch/x86/kernel/head_64.S
14948--- linux-2.6.32.46/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14949+++ linux-2.6.32.46/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14950@@ -19,6 +19,7 @@
14951 #include <asm/cache.h>
14952 #include <asm/processor-flags.h>
14953 #include <asm/percpu.h>
14954+#include <asm/cpufeature.h>
14955
14956 #ifdef CONFIG_PARAVIRT
14957 #include <asm/asm-offsets.h>
14958@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14959 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14960 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14961 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14962+L4_VMALLOC_START = pgd_index(VMALLOC_START)
14963+L3_VMALLOC_START = pud_index(VMALLOC_START)
14964+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14965+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14966
14967 .text
14968 __HEAD
14969@@ -85,35 +90,22 @@ startup_64:
14970 */
14971 addq %rbp, init_level4_pgt + 0(%rip)
14972 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14973+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14974+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14975 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14976
14977 addq %rbp, level3_ident_pgt + 0(%rip)
14978+#ifndef CONFIG_XEN
14979+ addq %rbp, level3_ident_pgt + 8(%rip)
14980+#endif
14981
14982- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14983- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14984+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14985
14986- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14987+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14988+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14989
14990- /* Add an Identity mapping if I am above 1G */
14991- leaq _text(%rip), %rdi
14992- andq $PMD_PAGE_MASK, %rdi
14993-
14994- movq %rdi, %rax
14995- shrq $PUD_SHIFT, %rax
14996- andq $(PTRS_PER_PUD - 1), %rax
14997- jz ident_complete
14998-
14999- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15000- leaq level3_ident_pgt(%rip), %rbx
15001- movq %rdx, 0(%rbx, %rax, 8)
15002-
15003- movq %rdi, %rax
15004- shrq $PMD_SHIFT, %rax
15005- andq $(PTRS_PER_PMD - 1), %rax
15006- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15007- leaq level2_spare_pgt(%rip), %rbx
15008- movq %rdx, 0(%rbx, %rax, 8)
15009-ident_complete:
15010+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15011+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15012
15013 /*
15014 * Fixup the kernel text+data virtual addresses. Note that
15015@@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
15016 * after the boot processor executes this code.
15017 */
15018
15019- /* Enable PAE mode and PGE */
15020- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15021+ /* Enable PAE mode and PSE/PGE */
15022+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15023 movq %rax, %cr4
15024
15025 /* Setup early boot stage 4 level pagetables. */
15026@@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
15027 movl $MSR_EFER, %ecx
15028 rdmsr
15029 btsl $_EFER_SCE, %eax /* Enable System Call */
15030- btl $20,%edi /* No Execute supported? */
15031+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15032 jnc 1f
15033 btsl $_EFER_NX, %eax
15034+ leaq init_level4_pgt(%rip), %rdi
15035+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15036+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15037+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15038 1: wrmsr /* Make changes effective */
15039
15040 /* Setup cr0 */
15041@@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
15042 .quad x86_64_start_kernel
15043 ENTRY(initial_gs)
15044 .quad INIT_PER_CPU_VAR(irq_stack_union)
15045- __FINITDATA
15046
15047 ENTRY(stack_start)
15048 .quad init_thread_union+THREAD_SIZE-8
15049 .word 0
15050+ __FINITDATA
15051
15052 bad_address:
15053 jmp bad_address
15054
15055- .section ".init.text","ax"
15056+ __INIT
15057 #ifdef CONFIG_EARLY_PRINTK
15058 .globl early_idt_handlers
15059 early_idt_handlers:
15060@@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
15061 #endif /* EARLY_PRINTK */
15062 1: hlt
15063 jmp 1b
15064+ .previous
15065
15066 #ifdef CONFIG_EARLY_PRINTK
15067+ __INITDATA
15068 early_recursion_flag:
15069 .long 0
15070+ .previous
15071
15072+ .section .rodata,"a",@progbits
15073 early_idt_msg:
15074 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15075 early_idt_ripmsg:
15076 .asciz "RIP %s\n"
15077-#endif /* CONFIG_EARLY_PRINTK */
15078 .previous
15079+#endif /* CONFIG_EARLY_PRINTK */
15080
15081+ .section .rodata,"a",@progbits
15082 #define NEXT_PAGE(name) \
15083 .balign PAGE_SIZE; \
15084 ENTRY(name)
15085@@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
15086 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15087 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15088 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15089+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
15090+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
15091+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15092+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15093 .org init_level4_pgt + L4_START_KERNEL*8, 0
15094 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15095 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15096
15097+#ifdef CONFIG_PAX_PER_CPU_PGD
15098+NEXT_PAGE(cpu_pgd)
15099+ .rept NR_CPUS
15100+ .fill 512,8,0
15101+ .endr
15102+#endif
15103+
15104 NEXT_PAGE(level3_ident_pgt)
15105 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15106+#ifdef CONFIG_XEN
15107 .fill 511,8,0
15108+#else
15109+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15110+ .fill 510,8,0
15111+#endif
15112+
15113+NEXT_PAGE(level3_vmalloc_pgt)
15114+ .fill 512,8,0
15115+
15116+NEXT_PAGE(level3_vmemmap_pgt)
15117+ .fill L3_VMEMMAP_START,8,0
15118+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15119
15120 NEXT_PAGE(level3_kernel_pgt)
15121 .fill L3_START_KERNEL,8,0
15122@@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
15123 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15124 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15125
15126+NEXT_PAGE(level2_vmemmap_pgt)
15127+ .fill 512,8,0
15128+
15129 NEXT_PAGE(level2_fixmap_pgt)
15130- .fill 506,8,0
15131- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15132- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15133- .fill 5,8,0
15134+ .fill 507,8,0
15135+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15136+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15137+ .fill 4,8,0
15138
15139-NEXT_PAGE(level1_fixmap_pgt)
15140+NEXT_PAGE(level1_vsyscall_pgt)
15141 .fill 512,8,0
15142
15143-NEXT_PAGE(level2_ident_pgt)
15144- /* Since I easily can, map the first 1G.
15145+ /* Since I easily can, map the first 2G.
15146 * Don't set NX because code runs from these pages.
15147 */
15148- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15149+NEXT_PAGE(level2_ident_pgt)
15150+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15151
15152 NEXT_PAGE(level2_kernel_pgt)
15153 /*
15154@@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
15155 * If you want to increase this then increase MODULES_VADDR
15156 * too.)
15157 */
15158- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15159- KERNEL_IMAGE_SIZE/PMD_SIZE)
15160-
15161-NEXT_PAGE(level2_spare_pgt)
15162- .fill 512, 8, 0
15163+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15164
15165 #undef PMDS
15166 #undef NEXT_PAGE
15167
15168- .data
15169+ .align PAGE_SIZE
15170+ENTRY(cpu_gdt_table)
15171+ .rept NR_CPUS
15172+ .quad 0x0000000000000000 /* NULL descriptor */
15173+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15174+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
15175+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
15176+ .quad 0x00cffb000000ffff /* __USER32_CS */
15177+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15178+ .quad 0x00affb000000ffff /* __USER_CS */
15179+
15180+#ifdef CONFIG_PAX_KERNEXEC
15181+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15182+#else
15183+ .quad 0x0 /* unused */
15184+#endif
15185+
15186+ .quad 0,0 /* TSS */
15187+ .quad 0,0 /* LDT */
15188+ .quad 0,0,0 /* three TLS descriptors */
15189+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
15190+ /* asm/segment.h:GDT_ENTRIES must match this */
15191+
15192+ /* zero the remaining page */
15193+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15194+ .endr
15195+
15196 .align 16
15197 .globl early_gdt_descr
15198 early_gdt_descr:
15199 .word GDT_ENTRIES*8-1
15200 early_gdt_descr_base:
15201- .quad INIT_PER_CPU_VAR(gdt_page)
15202+ .quad cpu_gdt_table
15203
15204 ENTRY(phys_base)
15205 /* This must match the first entry in level2_kernel_pgt */
15206 .quad 0x0000000000000000
15207
15208 #include "../../x86/xen/xen-head.S"
15209-
15210- .section .bss, "aw", @nobits
15211+
15212+ .section .rodata,"a",@progbits
15213 .align L1_CACHE_BYTES
15214 ENTRY(idt_table)
15215- .skip IDT_ENTRIES * 16
15216+ .fill 512,8,0
15217
15218 __PAGE_ALIGNED_BSS
15219 .align PAGE_SIZE
15220diff -urNp linux-2.6.32.46/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.46/arch/x86/kernel/i386_ksyms_32.c
15221--- linux-2.6.32.46/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
15222+++ linux-2.6.32.46/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
15223@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15224 EXPORT_SYMBOL(cmpxchg8b_emu);
15225 #endif
15226
15227+EXPORT_SYMBOL_GPL(cpu_gdt_table);
15228+
15229 /* Networking helper routines. */
15230 EXPORT_SYMBOL(csum_partial_copy_generic);
15231+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15232+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15233
15234 EXPORT_SYMBOL(__get_user_1);
15235 EXPORT_SYMBOL(__get_user_2);
15236@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15237
15238 EXPORT_SYMBOL(csum_partial);
15239 EXPORT_SYMBOL(empty_zero_page);
15240+
15241+#ifdef CONFIG_PAX_KERNEXEC
15242+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15243+#endif
15244diff -urNp linux-2.6.32.46/arch/x86/kernel/i8259.c linux-2.6.32.46/arch/x86/kernel/i8259.c
15245--- linux-2.6.32.46/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
15246+++ linux-2.6.32.46/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
15247@@ -208,7 +208,7 @@ spurious_8259A_irq:
15248 "spurious 8259A interrupt: IRQ%d.\n", irq);
15249 spurious_irq_mask |= irqmask;
15250 }
15251- atomic_inc(&irq_err_count);
15252+ atomic_inc_unchecked(&irq_err_count);
15253 /*
15254 * Theoretically we do not have to handle this IRQ,
15255 * but in Linux this does not cause problems and is
15256diff -urNp linux-2.6.32.46/arch/x86/kernel/init_task.c linux-2.6.32.46/arch/x86/kernel/init_task.c
15257--- linux-2.6.32.46/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
15258+++ linux-2.6.32.46/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
15259@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
15260 * way process stacks are handled. This is done by having a special
15261 * "init_task" linker map entry..
15262 */
15263-union thread_union init_thread_union __init_task_data =
15264- { INIT_THREAD_INFO(init_task) };
15265+union thread_union init_thread_union __init_task_data;
15266
15267 /*
15268 * Initial task structure.
15269@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15270 * section. Since TSS's are completely CPU-local, we want them
15271 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15272 */
15273-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15274-
15275+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15276+EXPORT_SYMBOL(init_tss);
15277diff -urNp linux-2.6.32.46/arch/x86/kernel/ioport.c linux-2.6.32.46/arch/x86/kernel/ioport.c
15278--- linux-2.6.32.46/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
15279+++ linux-2.6.32.46/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
15280@@ -6,6 +6,7 @@
15281 #include <linux/sched.h>
15282 #include <linux/kernel.h>
15283 #include <linux/capability.h>
15284+#include <linux/security.h>
15285 #include <linux/errno.h>
15286 #include <linux/types.h>
15287 #include <linux/ioport.h>
15288@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
15289
15290 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15291 return -EINVAL;
15292+#ifdef CONFIG_GRKERNSEC_IO
15293+ if (turn_on && grsec_disable_privio) {
15294+ gr_handle_ioperm();
15295+ return -EPERM;
15296+ }
15297+#endif
15298 if (turn_on && !capable(CAP_SYS_RAWIO))
15299 return -EPERM;
15300
15301@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
15302 * because the ->io_bitmap_max value must match the bitmap
15303 * contents:
15304 */
15305- tss = &per_cpu(init_tss, get_cpu());
15306+ tss = init_tss + get_cpu();
15307
15308 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
15309
15310@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
15311 return -EINVAL;
15312 /* Trying to gain more privileges? */
15313 if (level > old) {
15314+#ifdef CONFIG_GRKERNSEC_IO
15315+ if (grsec_disable_privio) {
15316+ gr_handle_iopl();
15317+ return -EPERM;
15318+ }
15319+#endif
15320 if (!capable(CAP_SYS_RAWIO))
15321 return -EPERM;
15322 }
15323diff -urNp linux-2.6.32.46/arch/x86/kernel/irq_32.c linux-2.6.32.46/arch/x86/kernel/irq_32.c
15324--- linux-2.6.32.46/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
15325+++ linux-2.6.32.46/arch/x86/kernel/irq_32.c 2011-07-06 19:53:33.000000000 -0400
15326@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
15327 __asm__ __volatile__("andl %%esp,%0" :
15328 "=r" (sp) : "0" (THREAD_SIZE - 1));
15329
15330- return sp < (sizeof(struct thread_info) + STACK_WARN);
15331+ return sp < STACK_WARN;
15332 }
15333
15334 static void print_stack_overflow(void)
15335@@ -54,9 +54,9 @@ static inline void print_stack_overflow(
15336 * per-CPU IRQ handling contexts (thread information and stack)
15337 */
15338 union irq_ctx {
15339- struct thread_info tinfo;
15340- u32 stack[THREAD_SIZE/sizeof(u32)];
15341-} __attribute__((aligned(PAGE_SIZE)));
15342+ unsigned long previous_esp;
15343+ u32 stack[THREAD_SIZE/sizeof(u32)];
15344+} __attribute__((aligned(THREAD_SIZE)));
15345
15346 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15347 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
15348@@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
15349 static inline int
15350 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15351 {
15352- union irq_ctx *curctx, *irqctx;
15353+ union irq_ctx *irqctx;
15354 u32 *isp, arg1, arg2;
15355
15356- curctx = (union irq_ctx *) current_thread_info();
15357 irqctx = __get_cpu_var(hardirq_ctx);
15358
15359 /*
15360@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struc
15361 * handler) we can't do that and just have to keep using the
15362 * current stack (which is the irq stack already after all)
15363 */
15364- if (unlikely(curctx == irqctx))
15365+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15366 return 0;
15367
15368 /* build the stack frame on the IRQ stack */
15369- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15370- irqctx->tinfo.task = curctx->tinfo.task;
15371- irqctx->tinfo.previous_esp = current_stack_pointer;
15372+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15373+ irqctx->previous_esp = current_stack_pointer;
15374
15375- /*
15376- * Copy the softirq bits in preempt_count so that the
15377- * softirq checks work in the hardirq context.
15378- */
15379- irqctx->tinfo.preempt_count =
15380- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15381- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15382+#ifdef CONFIG_PAX_MEMORY_UDEREF
15383+ __set_fs(MAKE_MM_SEG(0));
15384+#endif
15385
15386 if (unlikely(overflow))
15387 call_on_stack(print_stack_overflow, isp);
15388@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struc
15389 : "0" (irq), "1" (desc), "2" (isp),
15390 "D" (desc->handle_irq)
15391 : "memory", "cc", "ecx");
15392+
15393+#ifdef CONFIG_PAX_MEMORY_UDEREF
15394+ __set_fs(current_thread_info()->addr_limit);
15395+#endif
15396+
15397 return 1;
15398 }
15399
15400@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struc
15401 */
15402 void __cpuinit irq_ctx_init(int cpu)
15403 {
15404- union irq_ctx *irqctx;
15405-
15406 if (per_cpu(hardirq_ctx, cpu))
15407 return;
15408
15409- irqctx = &per_cpu(hardirq_stack, cpu);
15410- irqctx->tinfo.task = NULL;
15411- irqctx->tinfo.exec_domain = NULL;
15412- irqctx->tinfo.cpu = cpu;
15413- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15414- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15415-
15416- per_cpu(hardirq_ctx, cpu) = irqctx;
15417-
15418- irqctx = &per_cpu(softirq_stack, cpu);
15419- irqctx->tinfo.task = NULL;
15420- irqctx->tinfo.exec_domain = NULL;
15421- irqctx->tinfo.cpu = cpu;
15422- irqctx->tinfo.preempt_count = 0;
15423- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15424-
15425- per_cpu(softirq_ctx, cpu) = irqctx;
15426+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
15427+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
15428
15429 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15430 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15431@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
15432 asmlinkage void do_softirq(void)
15433 {
15434 unsigned long flags;
15435- struct thread_info *curctx;
15436 union irq_ctx *irqctx;
15437 u32 *isp;
15438
15439@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
15440 local_irq_save(flags);
15441
15442 if (local_softirq_pending()) {
15443- curctx = current_thread_info();
15444 irqctx = __get_cpu_var(softirq_ctx);
15445- irqctx->tinfo.task = curctx->task;
15446- irqctx->tinfo.previous_esp = current_stack_pointer;
15447+ irqctx->previous_esp = current_stack_pointer;
15448
15449 /* build the stack frame on the softirq stack */
15450- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15451+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15452+
15453+#ifdef CONFIG_PAX_MEMORY_UDEREF
15454+ __set_fs(MAKE_MM_SEG(0));
15455+#endif
15456
15457 call_on_stack(__do_softirq, isp);
15458+
15459+#ifdef CONFIG_PAX_MEMORY_UDEREF
15460+ __set_fs(current_thread_info()->addr_limit);
15461+#endif
15462+
15463 /*
15464 * Shouldnt happen, we returned above if in_interrupt():
15465 */
15466diff -urNp linux-2.6.32.46/arch/x86/kernel/irq.c linux-2.6.32.46/arch/x86/kernel/irq.c
15467--- linux-2.6.32.46/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
15468+++ linux-2.6.32.46/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
15469@@ -15,7 +15,7 @@
15470 #include <asm/mce.h>
15471 #include <asm/hw_irq.h>
15472
15473-atomic_t irq_err_count;
15474+atomic_unchecked_t irq_err_count;
15475
15476 /* Function pointer for generic interrupt vector handling */
15477 void (*generic_interrupt_extension)(void) = NULL;
15478@@ -114,9 +114,9 @@ static int show_other_interrupts(struct
15479 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15480 seq_printf(p, " Machine check polls\n");
15481 #endif
15482- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15483+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15484 #if defined(CONFIG_X86_IO_APIC)
15485- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15486+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15487 #endif
15488 return 0;
15489 }
15490@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15491
15492 u64 arch_irq_stat(void)
15493 {
15494- u64 sum = atomic_read(&irq_err_count);
15495+ u64 sum = atomic_read_unchecked(&irq_err_count);
15496
15497 #ifdef CONFIG_X86_IO_APIC
15498- sum += atomic_read(&irq_mis_count);
15499+ sum += atomic_read_unchecked(&irq_mis_count);
15500 #endif
15501 return sum;
15502 }
15503diff -urNp linux-2.6.32.46/arch/x86/kernel/kgdb.c linux-2.6.32.46/arch/x86/kernel/kgdb.c
15504--- linux-2.6.32.46/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
15505+++ linux-2.6.32.46/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
15506@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
15507
15508 /* clear the trace bit */
15509 linux_regs->flags &= ~X86_EFLAGS_TF;
15510- atomic_set(&kgdb_cpu_doing_single_step, -1);
15511+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15512
15513 /* set the trace bit if we're stepping */
15514 if (remcomInBuffer[0] == 's') {
15515 linux_regs->flags |= X86_EFLAGS_TF;
15516 kgdb_single_step = 1;
15517- atomic_set(&kgdb_cpu_doing_single_step,
15518+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15519 raw_smp_processor_id());
15520 }
15521
15522@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
15523 break;
15524
15525 case DIE_DEBUG:
15526- if (atomic_read(&kgdb_cpu_doing_single_step) ==
15527+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
15528 raw_smp_processor_id()) {
15529 if (user_mode(regs))
15530 return single_step_cont(regs, args);
15531@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
15532 return instruction_pointer(regs);
15533 }
15534
15535-struct kgdb_arch arch_kgdb_ops = {
15536+const struct kgdb_arch arch_kgdb_ops = {
15537 /* Breakpoint instruction: */
15538 .gdb_bpt_instr = { 0xcc },
15539 .flags = KGDB_HW_BREAKPOINT,
15540diff -urNp linux-2.6.32.46/arch/x86/kernel/kprobes.c linux-2.6.32.46/arch/x86/kernel/kprobes.c
15541--- linux-2.6.32.46/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
15542+++ linux-2.6.32.46/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
15543@@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
15544 char op;
15545 s32 raddr;
15546 } __attribute__((packed)) * jop;
15547- jop = (struct __arch_jmp_op *)from;
15548+
15549+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
15550+
15551+ pax_open_kernel();
15552 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
15553 jop->op = RELATIVEJUMP_INSTRUCTION;
15554+ pax_close_kernel();
15555 }
15556
15557 /*
15558@@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
15559 kprobe_opcode_t opcode;
15560 kprobe_opcode_t *orig_opcodes = opcodes;
15561
15562- if (search_exception_tables((unsigned long)opcodes))
15563+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15564 return 0; /* Page fault may occur on this address. */
15565
15566 retry:
15567@@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
15568 disp = (u8 *) p->addr + *((s32 *) insn) -
15569 (u8 *) p->ainsn.insn;
15570 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
15571+ pax_open_kernel();
15572 *(s32 *)insn = (s32) disp;
15573+ pax_close_kernel();
15574 }
15575 }
15576 #endif
15577@@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
15578
15579 static void __kprobes arch_copy_kprobe(struct kprobe *p)
15580 {
15581- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15582+ pax_open_kernel();
15583+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15584+ pax_close_kernel();
15585
15586 fix_riprel(p);
15587
15588- if (can_boost(p->addr))
15589+ if (can_boost(ktla_ktva(p->addr)))
15590 p->ainsn.boostable = 0;
15591 else
15592 p->ainsn.boostable = -1;
15593
15594- p->opcode = *p->addr;
15595+ p->opcode = *(ktla_ktva(p->addr));
15596 }
15597
15598 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15599@@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
15600 if (p->opcode == BREAKPOINT_INSTRUCTION)
15601 regs->ip = (unsigned long)p->addr;
15602 else
15603- regs->ip = (unsigned long)p->ainsn.insn;
15604+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15605 }
15606
15607 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
15608@@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
15609 if (p->ainsn.boostable == 1 && !p->post_handler) {
15610 /* Boost up -- we can execute copied instructions directly */
15611 reset_current_kprobe();
15612- regs->ip = (unsigned long)p->ainsn.insn;
15613+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15614 preempt_enable_no_resched();
15615 return;
15616 }
15617@@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
15618 struct kprobe_ctlblk *kcb;
15619
15620 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
15621- if (*addr != BREAKPOINT_INSTRUCTION) {
15622+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15623 /*
15624 * The breakpoint instruction was removed right
15625 * after we hit it. Another cpu has removed
15626@@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
15627 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15628 {
15629 unsigned long *tos = stack_addr(regs);
15630- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15631+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15632 unsigned long orig_ip = (unsigned long)p->addr;
15633 kprobe_opcode_t *insn = p->ainsn.insn;
15634
15635@@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
15636 struct die_args *args = data;
15637 int ret = NOTIFY_DONE;
15638
15639- if (args->regs && user_mode_vm(args->regs))
15640+ if (args->regs && user_mode(args->regs))
15641 return ret;
15642
15643 switch (val) {
15644diff -urNp linux-2.6.32.46/arch/x86/kernel/kvm.c linux-2.6.32.46/arch/x86/kernel/kvm.c
15645--- linux-2.6.32.46/arch/x86/kernel/kvm.c 2011-03-27 14:31:47.000000000 -0400
15646+++ linux-2.6.32.46/arch/x86/kernel/kvm.c 2011-08-24 18:35:52.000000000 -0400
15647@@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(vo
15648 pv_mmu_ops.set_pud = kvm_set_pud;
15649 #if PAGETABLE_LEVELS == 4
15650 pv_mmu_ops.set_pgd = kvm_set_pgd;
15651+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
15652 #endif
15653 #endif
15654 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
15655diff -urNp linux-2.6.32.46/arch/x86/kernel/ldt.c linux-2.6.32.46/arch/x86/kernel/ldt.c
15656--- linux-2.6.32.46/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
15657+++ linux-2.6.32.46/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
15658@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
15659 if (reload) {
15660 #ifdef CONFIG_SMP
15661 preempt_disable();
15662- load_LDT(pc);
15663+ load_LDT_nolock(pc);
15664 if (!cpumask_equal(mm_cpumask(current->mm),
15665 cpumask_of(smp_processor_id())))
15666 smp_call_function(flush_ldt, current->mm, 1);
15667 preempt_enable();
15668 #else
15669- load_LDT(pc);
15670+ load_LDT_nolock(pc);
15671 #endif
15672 }
15673 if (oldsize) {
15674@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
15675 return err;
15676
15677 for (i = 0; i < old->size; i++)
15678- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15679+ write_ldt_entry(new->ldt, i, old->ldt + i);
15680 return 0;
15681 }
15682
15683@@ -115,6 +115,24 @@ int init_new_context(struct task_struct
15684 retval = copy_ldt(&mm->context, &old_mm->context);
15685 mutex_unlock(&old_mm->context.lock);
15686 }
15687+
15688+ if (tsk == current) {
15689+ mm->context.vdso = 0;
15690+
15691+#ifdef CONFIG_X86_32
15692+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15693+ mm->context.user_cs_base = 0UL;
15694+ mm->context.user_cs_limit = ~0UL;
15695+
15696+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15697+ cpus_clear(mm->context.cpu_user_cs_mask);
15698+#endif
15699+
15700+#endif
15701+#endif
15702+
15703+ }
15704+
15705 return retval;
15706 }
15707
15708@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
15709 }
15710 }
15711
15712+#ifdef CONFIG_PAX_SEGMEXEC
15713+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15714+ error = -EINVAL;
15715+ goto out_unlock;
15716+ }
15717+#endif
15718+
15719 fill_ldt(&ldt, &ldt_info);
15720 if (oldmode)
15721 ldt.avl = 0;
15722diff -urNp linux-2.6.32.46/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.46/arch/x86/kernel/machine_kexec_32.c
15723--- linux-2.6.32.46/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
15724+++ linux-2.6.32.46/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
15725@@ -26,7 +26,7 @@
15726 #include <asm/system.h>
15727 #include <asm/cacheflush.h>
15728
15729-static void set_idt(void *newidt, __u16 limit)
15730+static void set_idt(struct desc_struct *newidt, __u16 limit)
15731 {
15732 struct desc_ptr curidt;
15733
15734@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
15735 }
15736
15737
15738-static void set_gdt(void *newgdt, __u16 limit)
15739+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15740 {
15741 struct desc_ptr curgdt;
15742
15743@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15744 }
15745
15746 control_page = page_address(image->control_code_page);
15747- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15748+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15749
15750 relocate_kernel_ptr = control_page;
15751 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15752diff -urNp linux-2.6.32.46/arch/x86/kernel/microcode_amd.c linux-2.6.32.46/arch/x86/kernel/microcode_amd.c
15753--- linux-2.6.32.46/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
15754+++ linux-2.6.32.46/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
15755@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
15756 uci->mc = NULL;
15757 }
15758
15759-static struct microcode_ops microcode_amd_ops = {
15760+static const struct microcode_ops microcode_amd_ops = {
15761 .request_microcode_user = request_microcode_user,
15762 .request_microcode_fw = request_microcode_fw,
15763 .collect_cpu_info = collect_cpu_info_amd,
15764@@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
15765 .microcode_fini_cpu = microcode_fini_cpu_amd,
15766 };
15767
15768-struct microcode_ops * __init init_amd_microcode(void)
15769+const struct microcode_ops * __init init_amd_microcode(void)
15770 {
15771 return &microcode_amd_ops;
15772 }
15773diff -urNp linux-2.6.32.46/arch/x86/kernel/microcode_core.c linux-2.6.32.46/arch/x86/kernel/microcode_core.c
15774--- linux-2.6.32.46/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15775+++ linux-2.6.32.46/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15776@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15777
15778 #define MICROCODE_VERSION "2.00"
15779
15780-static struct microcode_ops *microcode_ops;
15781+static const struct microcode_ops *microcode_ops;
15782
15783 /*
15784 * Synchronization.
15785diff -urNp linux-2.6.32.46/arch/x86/kernel/microcode_intel.c linux-2.6.32.46/arch/x86/kernel/microcode_intel.c
15786--- linux-2.6.32.46/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15787+++ linux-2.6.32.46/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15788@@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15789
15790 static int get_ucode_user(void *to, const void *from, size_t n)
15791 {
15792- return copy_from_user(to, from, n);
15793+ return copy_from_user(to, (__force const void __user *)from, n);
15794 }
15795
15796 static enum ucode_state
15797 request_microcode_user(int cpu, const void __user *buf, size_t size)
15798 {
15799- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15800+ return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15801 }
15802
15803 static void microcode_fini_cpu(int cpu)
15804@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15805 uci->mc = NULL;
15806 }
15807
15808-static struct microcode_ops microcode_intel_ops = {
15809+static const struct microcode_ops microcode_intel_ops = {
15810 .request_microcode_user = request_microcode_user,
15811 .request_microcode_fw = request_microcode_fw,
15812 .collect_cpu_info = collect_cpu_info,
15813@@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15814 .microcode_fini_cpu = microcode_fini_cpu,
15815 };
15816
15817-struct microcode_ops * __init init_intel_microcode(void)
15818+const struct microcode_ops * __init init_intel_microcode(void)
15819 {
15820 return &microcode_intel_ops;
15821 }
15822diff -urNp linux-2.6.32.46/arch/x86/kernel/module.c linux-2.6.32.46/arch/x86/kernel/module.c
15823--- linux-2.6.32.46/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15824+++ linux-2.6.32.46/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15825@@ -34,7 +34,7 @@
15826 #define DEBUGP(fmt...)
15827 #endif
15828
15829-void *module_alloc(unsigned long size)
15830+static void *__module_alloc(unsigned long size, pgprot_t prot)
15831 {
15832 struct vm_struct *area;
15833
15834@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15835 if (!area)
15836 return NULL;
15837
15838- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15839- PAGE_KERNEL_EXEC);
15840+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15841+}
15842+
15843+void *module_alloc(unsigned long size)
15844+{
15845+
15846+#ifdef CONFIG_PAX_KERNEXEC
15847+ return __module_alloc(size, PAGE_KERNEL);
15848+#else
15849+ return __module_alloc(size, PAGE_KERNEL_EXEC);
15850+#endif
15851+
15852 }
15853
15854 /* Free memory returned from module_alloc */
15855@@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15856 vfree(module_region);
15857 }
15858
15859+#ifdef CONFIG_PAX_KERNEXEC
15860+#ifdef CONFIG_X86_32
15861+void *module_alloc_exec(unsigned long size)
15862+{
15863+ struct vm_struct *area;
15864+
15865+ if (size == 0)
15866+ return NULL;
15867+
15868+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15869+ return area ? area->addr : NULL;
15870+}
15871+EXPORT_SYMBOL(module_alloc_exec);
15872+
15873+void module_free_exec(struct module *mod, void *module_region)
15874+{
15875+ vunmap(module_region);
15876+}
15877+EXPORT_SYMBOL(module_free_exec);
15878+#else
15879+void module_free_exec(struct module *mod, void *module_region)
15880+{
15881+ module_free(mod, module_region);
15882+}
15883+EXPORT_SYMBOL(module_free_exec);
15884+
15885+void *module_alloc_exec(unsigned long size)
15886+{
15887+ return __module_alloc(size, PAGE_KERNEL_RX);
15888+}
15889+EXPORT_SYMBOL(module_alloc_exec);
15890+#endif
15891+#endif
15892+
15893 /* We don't need anything special. */
15894 int module_frob_arch_sections(Elf_Ehdr *hdr,
15895 Elf_Shdr *sechdrs,
15896@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15897 unsigned int i;
15898 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15899 Elf32_Sym *sym;
15900- uint32_t *location;
15901+ uint32_t *plocation, location;
15902
15903 DEBUGP("Applying relocate section %u to %u\n", relsec,
15904 sechdrs[relsec].sh_info);
15905 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15906 /* This is where to make the change */
15907- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15908- + rel[i].r_offset;
15909+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15910+ location = (uint32_t)plocation;
15911+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15912+ plocation = ktla_ktva((void *)plocation);
15913 /* This is the symbol it is referring to. Note that all
15914 undefined symbols have been resolved. */
15915 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15916@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15917 switch (ELF32_R_TYPE(rel[i].r_info)) {
15918 case R_386_32:
15919 /* We add the value into the location given */
15920- *location += sym->st_value;
15921+ pax_open_kernel();
15922+ *plocation += sym->st_value;
15923+ pax_close_kernel();
15924 break;
15925 case R_386_PC32:
15926 /* Add the value, subtract its postition */
15927- *location += sym->st_value - (uint32_t)location;
15928+ pax_open_kernel();
15929+ *plocation += sym->st_value - location;
15930+ pax_close_kernel();
15931 break;
15932 default:
15933 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15934@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15935 case R_X86_64_NONE:
15936 break;
15937 case R_X86_64_64:
15938+ pax_open_kernel();
15939 *(u64 *)loc = val;
15940+ pax_close_kernel();
15941 break;
15942 case R_X86_64_32:
15943+ pax_open_kernel();
15944 *(u32 *)loc = val;
15945+ pax_close_kernel();
15946 if (val != *(u32 *)loc)
15947 goto overflow;
15948 break;
15949 case R_X86_64_32S:
15950+ pax_open_kernel();
15951 *(s32 *)loc = val;
15952+ pax_close_kernel();
15953 if ((s64)val != *(s32 *)loc)
15954 goto overflow;
15955 break;
15956 case R_X86_64_PC32:
15957 val -= (u64)loc;
15958+ pax_open_kernel();
15959 *(u32 *)loc = val;
15960+ pax_close_kernel();
15961+
15962 #if 0
15963 if ((s64)val != *(s32 *)loc)
15964 goto overflow;
15965diff -urNp linux-2.6.32.46/arch/x86/kernel/paravirt.c linux-2.6.32.46/arch/x86/kernel/paravirt.c
15966--- linux-2.6.32.46/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15967+++ linux-2.6.32.46/arch/x86/kernel/paravirt.c 2011-08-23 20:24:19.000000000 -0400
15968@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
15969 {
15970 return x;
15971 }
15972+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15973+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
15974+#endif
15975
15976 void __init default_banner(void)
15977 {
15978@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15979 * corresponding structure. */
15980 static void *get_call_destination(u8 type)
15981 {
15982- struct paravirt_patch_template tmpl = {
15983+ const struct paravirt_patch_template tmpl = {
15984 .pv_init_ops = pv_init_ops,
15985 .pv_time_ops = pv_time_ops,
15986 .pv_cpu_ops = pv_cpu_ops,
15987@@ -133,6 +136,8 @@ static void *get_call_destination(u8 typ
15988 .pv_lock_ops = pv_lock_ops,
15989 #endif
15990 };
15991+
15992+ pax_track_stack();
15993 return *((void **)&tmpl + type);
15994 }
15995
15996@@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type,
15997 if (opfunc == NULL)
15998 /* If there's no function, patch it with a ud2a (BUG) */
15999 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16000- else if (opfunc == _paravirt_nop)
16001+ else if (opfunc == (void *)_paravirt_nop)
16002 /* If the operation is a nop, then nop the callsite */
16003 ret = paravirt_patch_nop();
16004
16005 /* identity functions just return their single argument */
16006- else if (opfunc == _paravirt_ident_32)
16007+ else if (opfunc == (void *)_paravirt_ident_32)
16008 ret = paravirt_patch_ident_32(insnbuf, len);
16009- else if (opfunc == _paravirt_ident_64)
16010+ else if (opfunc == (void *)_paravirt_ident_64)
16011+ ret = paravirt_patch_ident_64(insnbuf, len);
16012+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16013+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16014 ret = paravirt_patch_ident_64(insnbuf, len);
16015+#endif
16016
16017 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16018 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16019@@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insn
16020 if (insn_len > len || start == NULL)
16021 insn_len = len;
16022 else
16023- memcpy(insnbuf, start, insn_len);
16024+ memcpy(insnbuf, ktla_ktva(start), insn_len);
16025
16026 return insn_len;
16027 }
16028@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
16029 preempt_enable();
16030 }
16031
16032-struct pv_info pv_info = {
16033+struct pv_info pv_info __read_only = {
16034 .name = "bare hardware",
16035 .paravirt_enabled = 0,
16036 .kernel_rpl = 0,
16037 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
16038 };
16039
16040-struct pv_init_ops pv_init_ops = {
16041+struct pv_init_ops pv_init_ops __read_only = {
16042 .patch = native_patch,
16043 };
16044
16045-struct pv_time_ops pv_time_ops = {
16046+struct pv_time_ops pv_time_ops __read_only = {
16047 .sched_clock = native_sched_clock,
16048 };
16049
16050-struct pv_irq_ops pv_irq_ops = {
16051+struct pv_irq_ops pv_irq_ops __read_only = {
16052 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16053 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16054 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16055@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
16056 #endif
16057 };
16058
16059-struct pv_cpu_ops pv_cpu_ops = {
16060+struct pv_cpu_ops pv_cpu_ops __read_only = {
16061 .cpuid = native_cpuid,
16062 .get_debugreg = native_get_debugreg,
16063 .set_debugreg = native_set_debugreg,
16064@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16065 .end_context_switch = paravirt_nop,
16066 };
16067
16068-struct pv_apic_ops pv_apic_ops = {
16069+struct pv_apic_ops pv_apic_ops __read_only = {
16070 #ifdef CONFIG_X86_LOCAL_APIC
16071 .startup_ipi_hook = paravirt_nop,
16072 #endif
16073 };
16074
16075-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16076+#ifdef CONFIG_X86_32
16077+#ifdef CONFIG_X86_PAE
16078+/* 64-bit pagetable entries */
16079+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16080+#else
16081 /* 32-bit pagetable entries */
16082 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16083+#endif
16084 #else
16085 /* 64-bit pagetable entries */
16086 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16087 #endif
16088
16089-struct pv_mmu_ops pv_mmu_ops = {
16090+struct pv_mmu_ops pv_mmu_ops __read_only = {
16091
16092 .read_cr2 = native_read_cr2,
16093 .write_cr2 = native_write_cr2,
16094@@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16095 .make_pud = PTE_IDENT,
16096
16097 .set_pgd = native_set_pgd,
16098+ .set_pgd_batched = native_set_pgd_batched,
16099 #endif
16100 #endif /* PAGETABLE_LEVELS >= 3 */
16101
16102@@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16103 },
16104
16105 .set_fixmap = native_set_fixmap,
16106+
16107+#ifdef CONFIG_PAX_KERNEXEC
16108+ .pax_open_kernel = native_pax_open_kernel,
16109+ .pax_close_kernel = native_pax_close_kernel,
16110+#endif
16111+
16112 };
16113
16114 EXPORT_SYMBOL_GPL(pv_time_ops);
16115diff -urNp linux-2.6.32.46/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.46/arch/x86/kernel/paravirt-spinlocks.c
16116--- linux-2.6.32.46/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
16117+++ linux-2.6.32.46/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
16118@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
16119 __raw_spin_lock(lock);
16120 }
16121
16122-struct pv_lock_ops pv_lock_ops = {
16123+struct pv_lock_ops pv_lock_ops __read_only = {
16124 #ifdef CONFIG_SMP
16125 .spin_is_locked = __ticket_spin_is_locked,
16126 .spin_is_contended = __ticket_spin_is_contended,
16127diff -urNp linux-2.6.32.46/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.46/arch/x86/kernel/pci-calgary_64.c
16128--- linux-2.6.32.46/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
16129+++ linux-2.6.32.46/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
16130@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
16131 free_pages((unsigned long)vaddr, get_order(size));
16132 }
16133
16134-static struct dma_map_ops calgary_dma_ops = {
16135+static const struct dma_map_ops calgary_dma_ops = {
16136 .alloc_coherent = calgary_alloc_coherent,
16137 .free_coherent = calgary_free_coherent,
16138 .map_sg = calgary_map_sg,
16139diff -urNp linux-2.6.32.46/arch/x86/kernel/pci-dma.c linux-2.6.32.46/arch/x86/kernel/pci-dma.c
16140--- linux-2.6.32.46/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
16141+++ linux-2.6.32.46/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
16142@@ -14,7 +14,7 @@
16143
16144 static int forbid_dac __read_mostly;
16145
16146-struct dma_map_ops *dma_ops;
16147+const struct dma_map_ops *dma_ops;
16148 EXPORT_SYMBOL(dma_ops);
16149
16150 static int iommu_sac_force __read_mostly;
16151@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
16152
16153 int dma_supported(struct device *dev, u64 mask)
16154 {
16155- struct dma_map_ops *ops = get_dma_ops(dev);
16156+ const struct dma_map_ops *ops = get_dma_ops(dev);
16157
16158 #ifdef CONFIG_PCI
16159 if (mask > 0xffffffff && forbid_dac > 0) {
16160diff -urNp linux-2.6.32.46/arch/x86/kernel/pci-gart_64.c linux-2.6.32.46/arch/x86/kernel/pci-gart_64.c
16161--- linux-2.6.32.46/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
16162+++ linux-2.6.32.46/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
16163@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
16164 return -1;
16165 }
16166
16167-static struct dma_map_ops gart_dma_ops = {
16168+static const struct dma_map_ops gart_dma_ops = {
16169 .map_sg = gart_map_sg,
16170 .unmap_sg = gart_unmap_sg,
16171 .map_page = gart_map_page,
16172diff -urNp linux-2.6.32.46/arch/x86/kernel/pci-nommu.c linux-2.6.32.46/arch/x86/kernel/pci-nommu.c
16173--- linux-2.6.32.46/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
16174+++ linux-2.6.32.46/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
16175@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
16176 flush_write_buffers();
16177 }
16178
16179-struct dma_map_ops nommu_dma_ops = {
16180+const struct dma_map_ops nommu_dma_ops = {
16181 .alloc_coherent = dma_generic_alloc_coherent,
16182 .free_coherent = nommu_free_coherent,
16183 .map_sg = nommu_map_sg,
16184diff -urNp linux-2.6.32.46/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.46/arch/x86/kernel/pci-swiotlb.c
16185--- linux-2.6.32.46/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
16186+++ linux-2.6.32.46/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
16187@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
16188 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
16189 }
16190
16191-static struct dma_map_ops swiotlb_dma_ops = {
16192+static const struct dma_map_ops swiotlb_dma_ops = {
16193 .mapping_error = swiotlb_dma_mapping_error,
16194 .alloc_coherent = x86_swiotlb_alloc_coherent,
16195 .free_coherent = swiotlb_free_coherent,
16196diff -urNp linux-2.6.32.46/arch/x86/kernel/process_32.c linux-2.6.32.46/arch/x86/kernel/process_32.c
16197--- linux-2.6.32.46/arch/x86/kernel/process_32.c 2011-06-25 12:55:34.000000000 -0400
16198+++ linux-2.6.32.46/arch/x86/kernel/process_32.c 2011-06-25 12:56:37.000000000 -0400
16199@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
16200 unsigned long thread_saved_pc(struct task_struct *tsk)
16201 {
16202 return ((unsigned long *)tsk->thread.sp)[3];
16203+//XXX return tsk->thread.eip;
16204 }
16205
16206 #ifndef CONFIG_SMP
16207@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
16208 unsigned short ss, gs;
16209 const char *board;
16210
16211- if (user_mode_vm(regs)) {
16212+ if (user_mode(regs)) {
16213 sp = regs->sp;
16214 ss = regs->ss & 0xffff;
16215- gs = get_user_gs(regs);
16216 } else {
16217 sp = (unsigned long) (&regs->sp);
16218 savesegment(ss, ss);
16219- savesegment(gs, gs);
16220 }
16221+ gs = get_user_gs(regs);
16222
16223 printk("\n");
16224
16225@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
16226 regs.bx = (unsigned long) fn;
16227 regs.dx = (unsigned long) arg;
16228
16229- regs.ds = __USER_DS;
16230- regs.es = __USER_DS;
16231+ regs.ds = __KERNEL_DS;
16232+ regs.es = __KERNEL_DS;
16233 regs.fs = __KERNEL_PERCPU;
16234- regs.gs = __KERNEL_STACK_CANARY;
16235+ savesegment(gs, regs.gs);
16236 regs.orig_ax = -1;
16237 regs.ip = (unsigned long) kernel_thread_helper;
16238 regs.cs = __KERNEL_CS | get_kernel_rpl();
16239@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
16240 struct task_struct *tsk;
16241 int err;
16242
16243- childregs = task_pt_regs(p);
16244+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16245 *childregs = *regs;
16246 childregs->ax = 0;
16247 childregs->sp = sp;
16248
16249 p->thread.sp = (unsigned long) childregs;
16250 p->thread.sp0 = (unsigned long) (childregs+1);
16251+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16252
16253 p->thread.ip = (unsigned long) ret_from_fork;
16254
16255@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
16256 struct thread_struct *prev = &prev_p->thread,
16257 *next = &next_p->thread;
16258 int cpu = smp_processor_id();
16259- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16260+ struct tss_struct *tss = init_tss + cpu;
16261 bool preload_fpu;
16262
16263 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16264@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
16265 */
16266 lazy_save_gs(prev->gs);
16267
16268+#ifdef CONFIG_PAX_MEMORY_UDEREF
16269+ __set_fs(task_thread_info(next_p)->addr_limit);
16270+#endif
16271+
16272 /*
16273 * Load the per-thread Thread-Local Storage descriptor.
16274 */
16275@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
16276 */
16277 arch_end_context_switch(next_p);
16278
16279+ percpu_write(current_task, next_p);
16280+ percpu_write(current_tinfo, &next_p->tinfo);
16281+
16282 if (preload_fpu)
16283 __math_state_restore();
16284
16285@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
16286 if (prev->gs | next->gs)
16287 lazy_load_gs(next->gs);
16288
16289- percpu_write(current_task, next_p);
16290-
16291 return prev_p;
16292 }
16293
16294@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
16295 } while (count++ < 16);
16296 return 0;
16297 }
16298-
16299diff -urNp linux-2.6.32.46/arch/x86/kernel/process_64.c linux-2.6.32.46/arch/x86/kernel/process_64.c
16300--- linux-2.6.32.46/arch/x86/kernel/process_64.c 2011-06-25 12:55:34.000000000 -0400
16301+++ linux-2.6.32.46/arch/x86/kernel/process_64.c 2011-06-25 12:56:37.000000000 -0400
16302@@ -91,7 +91,7 @@ static void __exit_idle(void)
16303 void exit_idle(void)
16304 {
16305 /* idle loop has pid 0 */
16306- if (current->pid)
16307+ if (task_pid_nr(current))
16308 return;
16309 __exit_idle();
16310 }
16311@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
16312 if (!board)
16313 board = "";
16314 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
16315- current->pid, current->comm, print_tainted(),
16316+ task_pid_nr(current), current->comm, print_tainted(),
16317 init_utsname()->release,
16318 (int)strcspn(init_utsname()->version, " "),
16319 init_utsname()->version, board);
16320@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
16321 struct pt_regs *childregs;
16322 struct task_struct *me = current;
16323
16324- childregs = ((struct pt_regs *)
16325- (THREAD_SIZE + task_stack_page(p))) - 1;
16326+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16327 *childregs = *regs;
16328
16329 childregs->ax = 0;
16330@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
16331 p->thread.sp = (unsigned long) childregs;
16332 p->thread.sp0 = (unsigned long) (childregs+1);
16333 p->thread.usersp = me->thread.usersp;
16334+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16335
16336 set_tsk_thread_flag(p, TIF_FORK);
16337
16338@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
16339 struct thread_struct *prev = &prev_p->thread;
16340 struct thread_struct *next = &next_p->thread;
16341 int cpu = smp_processor_id();
16342- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16343+ struct tss_struct *tss = init_tss + cpu;
16344 unsigned fsindex, gsindex;
16345 bool preload_fpu;
16346
16347@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
16348 prev->usersp = percpu_read(old_rsp);
16349 percpu_write(old_rsp, next->usersp);
16350 percpu_write(current_task, next_p);
16351+ percpu_write(current_tinfo, &next_p->tinfo);
16352
16353- percpu_write(kernel_stack,
16354- (unsigned long)task_stack_page(next_p) +
16355- THREAD_SIZE - KERNEL_STACK_OFFSET);
16356+ percpu_write(kernel_stack, next->sp0);
16357
16358 /*
16359 * Now maybe reload the debug registers and handle I/O bitmaps
16360@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
16361 if (!p || p == current || p->state == TASK_RUNNING)
16362 return 0;
16363 stack = (unsigned long)task_stack_page(p);
16364- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16365+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16366 return 0;
16367 fp = *(u64 *)(p->thread.sp);
16368 do {
16369- if (fp < (unsigned long)stack ||
16370- fp >= (unsigned long)stack+THREAD_SIZE)
16371+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16372 return 0;
16373 ip = *(u64 *)(fp+8);
16374 if (!in_sched_functions(ip))
16375diff -urNp linux-2.6.32.46/arch/x86/kernel/process.c linux-2.6.32.46/arch/x86/kernel/process.c
16376--- linux-2.6.32.46/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
16377+++ linux-2.6.32.46/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
16378@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
16379
16380 void free_thread_info(struct thread_info *ti)
16381 {
16382- free_thread_xstate(ti->task);
16383 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
16384 }
16385
16386+static struct kmem_cache *task_struct_cachep;
16387+
16388 void arch_task_cache_init(void)
16389 {
16390- task_xstate_cachep =
16391- kmem_cache_create("task_xstate", xstate_size,
16392+ /* create a slab on which task_structs can be allocated */
16393+ task_struct_cachep =
16394+ kmem_cache_create("task_struct", sizeof(struct task_struct),
16395+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16396+
16397+ task_xstate_cachep =
16398+ kmem_cache_create("task_xstate", xstate_size,
16399 __alignof__(union thread_xstate),
16400- SLAB_PANIC | SLAB_NOTRACK, NULL);
16401+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16402+}
16403+
16404+struct task_struct *alloc_task_struct(void)
16405+{
16406+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
16407+}
16408+
16409+void free_task_struct(struct task_struct *task)
16410+{
16411+ free_thread_xstate(task);
16412+ kmem_cache_free(task_struct_cachep, task);
16413 }
16414
16415 /*
16416@@ -73,7 +90,7 @@ void exit_thread(void)
16417 unsigned long *bp = t->io_bitmap_ptr;
16418
16419 if (bp) {
16420- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16421+ struct tss_struct *tss = init_tss + get_cpu();
16422
16423 t->io_bitmap_ptr = NULL;
16424 clear_thread_flag(TIF_IO_BITMAP);
16425@@ -93,6 +110,9 @@ void flush_thread(void)
16426
16427 clear_tsk_thread_flag(tsk, TIF_DEBUG);
16428
16429+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16430+ loadsegment(gs, 0);
16431+#endif
16432 tsk->thread.debugreg0 = 0;
16433 tsk->thread.debugreg1 = 0;
16434 tsk->thread.debugreg2 = 0;
16435@@ -307,7 +327,7 @@ void default_idle(void)
16436 EXPORT_SYMBOL(default_idle);
16437 #endif
16438
16439-void stop_this_cpu(void *dummy)
16440+__noreturn void stop_this_cpu(void *dummy)
16441 {
16442 local_irq_disable();
16443 /*
16444@@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
16445 }
16446 early_param("idle", idle_setup);
16447
16448-unsigned long arch_align_stack(unsigned long sp)
16449+#ifdef CONFIG_PAX_RANDKSTACK
16450+asmlinkage void pax_randomize_kstack(void)
16451 {
16452- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16453- sp -= get_random_int() % 8192;
16454- return sp & ~0xf;
16455-}
16456+ struct thread_struct *thread = &current->thread;
16457+ unsigned long time;
16458
16459-unsigned long arch_randomize_brk(struct mm_struct *mm)
16460-{
16461- unsigned long range_end = mm->brk + 0x02000000;
16462- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16463+ if (!randomize_va_space)
16464+ return;
16465+
16466+ rdtscl(time);
16467+
16468+ /* P4 seems to return a 0 LSB, ignore it */
16469+#ifdef CONFIG_MPENTIUM4
16470+ time &= 0x3EUL;
16471+ time <<= 2;
16472+#elif defined(CONFIG_X86_64)
16473+ time &= 0xFUL;
16474+ time <<= 4;
16475+#else
16476+ time &= 0x1FUL;
16477+ time <<= 3;
16478+#endif
16479+
16480+ thread->sp0 ^= time;
16481+ load_sp0(init_tss + smp_processor_id(), thread);
16482+
16483+#ifdef CONFIG_X86_64
16484+ percpu_write(kernel_stack, thread->sp0);
16485+#endif
16486 }
16487+#endif
16488
16489diff -urNp linux-2.6.32.46/arch/x86/kernel/ptrace.c linux-2.6.32.46/arch/x86/kernel/ptrace.c
16490--- linux-2.6.32.46/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
16491+++ linux-2.6.32.46/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
16492@@ -925,7 +925,7 @@ static const struct user_regset_view use
16493 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
16494 {
16495 int ret;
16496- unsigned long __user *datap = (unsigned long __user *)data;
16497+ unsigned long __user *datap = (__force unsigned long __user *)data;
16498
16499 switch (request) {
16500 /* read the word at location addr in the USER area. */
16501@@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
16502 if (addr < 0)
16503 return -EIO;
16504 ret = do_get_thread_area(child, addr,
16505- (struct user_desc __user *) data);
16506+ (__force struct user_desc __user *) data);
16507 break;
16508
16509 case PTRACE_SET_THREAD_AREA:
16510 if (addr < 0)
16511 return -EIO;
16512 ret = do_set_thread_area(child, addr,
16513- (struct user_desc __user *) data, 0);
16514+ (__force struct user_desc __user *) data, 0);
16515 break;
16516 #endif
16517
16518@@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
16519 #ifdef CONFIG_X86_PTRACE_BTS
16520 case PTRACE_BTS_CONFIG:
16521 ret = ptrace_bts_config
16522- (child, data, (struct ptrace_bts_config __user *)addr);
16523+ (child, data, (__force struct ptrace_bts_config __user *)addr);
16524 break;
16525
16526 case PTRACE_BTS_STATUS:
16527 ret = ptrace_bts_status
16528- (child, data, (struct ptrace_bts_config __user *)addr);
16529+ (child, data, (__force struct ptrace_bts_config __user *)addr);
16530 break;
16531
16532 case PTRACE_BTS_SIZE:
16533@@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
16534
16535 case PTRACE_BTS_GET:
16536 ret = ptrace_bts_read_record
16537- (child, data, (struct bts_struct __user *) addr);
16538+ (child, data, (__force struct bts_struct __user *) addr);
16539 break;
16540
16541 case PTRACE_BTS_CLEAR:
16542@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
16543
16544 case PTRACE_BTS_DRAIN:
16545 ret = ptrace_bts_drain
16546- (child, data, (struct bts_struct __user *) addr);
16547+ (child, data, (__force struct bts_struct __user *) addr);
16548 break;
16549 #endif /* CONFIG_X86_PTRACE_BTS */
16550
16551@@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
16552 info.si_code = si_code;
16553
16554 /* User-mode ip? */
16555- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
16556+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
16557
16558 /* Send us the fake SIGTRAP */
16559 force_sig_info(SIGTRAP, &info, tsk);
16560@@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
16561 * We must return the syscall number to actually look up in the table.
16562 * This can be -1L to skip running any syscall at all.
16563 */
16564-asmregparm long syscall_trace_enter(struct pt_regs *regs)
16565+long syscall_trace_enter(struct pt_regs *regs)
16566 {
16567 long ret = 0;
16568
16569@@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
16570 return ret ?: regs->orig_ax;
16571 }
16572
16573-asmregparm void syscall_trace_leave(struct pt_regs *regs)
16574+void syscall_trace_leave(struct pt_regs *regs)
16575 {
16576 if (unlikely(current->audit_context))
16577 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
16578diff -urNp linux-2.6.32.46/arch/x86/kernel/reboot.c linux-2.6.32.46/arch/x86/kernel/reboot.c
16579--- linux-2.6.32.46/arch/x86/kernel/reboot.c 2011-08-09 18:35:28.000000000 -0400
16580+++ linux-2.6.32.46/arch/x86/kernel/reboot.c 2011-08-09 18:33:59.000000000 -0400
16581@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
16582 EXPORT_SYMBOL(pm_power_off);
16583
16584 static const struct desc_ptr no_idt = {};
16585-static int reboot_mode;
16586+static unsigned short reboot_mode;
16587 enum reboot_type reboot_type = BOOT_KBD;
16588 int reboot_force;
16589
16590@@ -292,12 +292,12 @@ core_initcall(reboot_init);
16591 controller to pulse the CPU reset line, which is more thorough, but
16592 doesn't work with at least one type of 486 motherboard. It is easy
16593 to stop this code working; hence the copious comments. */
16594-static const unsigned long long
16595-real_mode_gdt_entries [3] =
16596+static struct desc_struct
16597+real_mode_gdt_entries [3] __read_only =
16598 {
16599- 0x0000000000000000ULL, /* Null descriptor */
16600- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
16601- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
16602+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
16603+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
16604+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
16605 };
16606
16607 static const struct desc_ptr
16608@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
16609 * specified by the code and length parameters.
16610 * We assume that length will aways be less that 100!
16611 */
16612-void machine_real_restart(const unsigned char *code, int length)
16613+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
16614 {
16615 local_irq_disable();
16616
16617@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
16618 /* Remap the kernel at virtual address zero, as well as offset zero
16619 from the kernel segment. This assumes the kernel segment starts at
16620 virtual address PAGE_OFFSET. */
16621- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16622- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
16623+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16624+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
16625
16626 /*
16627 * Use `swapper_pg_dir' as our page directory.
16628@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
16629 boot)". This seems like a fairly standard thing that gets set by
16630 REBOOT.COM programs, and the previous reset routine did this
16631 too. */
16632- *((unsigned short *)0x472) = reboot_mode;
16633+ *(unsigned short *)(__va(0x472)) = reboot_mode;
16634
16635 /* For the switch to real mode, copy some code to low memory. It has
16636 to be in the first 64k because it is running in 16-bit mode, and it
16637 has to have the same physical and virtual address, because it turns
16638 off paging. Copy it near the end of the first page, out of the way
16639 of BIOS variables. */
16640- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
16641- real_mode_switch, sizeof (real_mode_switch));
16642- memcpy((void *)(0x1000 - 100), code, length);
16643+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
16644+ memcpy(__va(0x1000 - 100), code, length);
16645
16646 /* Set up the IDT for real mode. */
16647 load_idt(&real_mode_idt);
16648@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
16649 __asm__ __volatile__ ("ljmp $0x0008,%0"
16650 :
16651 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
16652+ do { } while (1);
16653 }
16654 #ifdef CONFIG_APM_MODULE
16655 EXPORT_SYMBOL(machine_real_restart);
16656@@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_f
16657 {
16658 }
16659
16660-static void native_machine_emergency_restart(void)
16661+__noreturn static void native_machine_emergency_restart(void)
16662 {
16663 int i;
16664
16665@@ -659,13 +659,13 @@ void native_machine_shutdown(void)
16666 #endif
16667 }
16668
16669-static void __machine_emergency_restart(int emergency)
16670+static __noreturn void __machine_emergency_restart(int emergency)
16671 {
16672 reboot_emergency = emergency;
16673 machine_ops.emergency_restart();
16674 }
16675
16676-static void native_machine_restart(char *__unused)
16677+static __noreturn void native_machine_restart(char *__unused)
16678 {
16679 printk("machine restart\n");
16680
16681@@ -674,7 +674,7 @@ static void native_machine_restart(char
16682 __machine_emergency_restart(0);
16683 }
16684
16685-static void native_machine_halt(void)
16686+static __noreturn void native_machine_halt(void)
16687 {
16688 /* stop other cpus and apics */
16689 machine_shutdown();
16690@@ -685,7 +685,7 @@ static void native_machine_halt(void)
16691 stop_this_cpu(NULL);
16692 }
16693
16694-static void native_machine_power_off(void)
16695+__noreturn static void native_machine_power_off(void)
16696 {
16697 if (pm_power_off) {
16698 if (!reboot_force)
16699@@ -694,6 +694,7 @@ static void native_machine_power_off(voi
16700 }
16701 /* a fallback in case there is no PM info available */
16702 tboot_shutdown(TB_SHUTDOWN_HALT);
16703+ do { } while (1);
16704 }
16705
16706 struct machine_ops machine_ops = {
16707diff -urNp linux-2.6.32.46/arch/x86/kernel/setup.c linux-2.6.32.46/arch/x86/kernel/setup.c
16708--- linux-2.6.32.46/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
16709+++ linux-2.6.32.46/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
16710@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
16711
16712 if (!boot_params.hdr.root_flags)
16713 root_mountflags &= ~MS_RDONLY;
16714- init_mm.start_code = (unsigned long) _text;
16715- init_mm.end_code = (unsigned long) _etext;
16716+ init_mm.start_code = ktla_ktva((unsigned long) _text);
16717+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
16718 init_mm.end_data = (unsigned long) _edata;
16719 init_mm.brk = _brk_end;
16720
16721- code_resource.start = virt_to_phys(_text);
16722- code_resource.end = virt_to_phys(_etext)-1;
16723- data_resource.start = virt_to_phys(_etext);
16724+ code_resource.start = virt_to_phys(ktla_ktva(_text));
16725+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16726+ data_resource.start = virt_to_phys(_sdata);
16727 data_resource.end = virt_to_phys(_edata)-1;
16728 bss_resource.start = virt_to_phys(&__bss_start);
16729 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16730diff -urNp linux-2.6.32.46/arch/x86/kernel/setup_percpu.c linux-2.6.32.46/arch/x86/kernel/setup_percpu.c
16731--- linux-2.6.32.46/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
16732+++ linux-2.6.32.46/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
16733@@ -25,19 +25,17 @@
16734 # define DBG(x...)
16735 #endif
16736
16737-DEFINE_PER_CPU(int, cpu_number);
16738+#ifdef CONFIG_SMP
16739+DEFINE_PER_CPU(unsigned int, cpu_number);
16740 EXPORT_PER_CPU_SYMBOL(cpu_number);
16741+#endif
16742
16743-#ifdef CONFIG_X86_64
16744 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16745-#else
16746-#define BOOT_PERCPU_OFFSET 0
16747-#endif
16748
16749 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16750 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16751
16752-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16753+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16754 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16755 };
16756 EXPORT_SYMBOL(__per_cpu_offset);
16757@@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
16758 {
16759 #ifdef CONFIG_X86_32
16760 struct desc_struct gdt;
16761+ unsigned long base = per_cpu_offset(cpu);
16762
16763- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16764- 0x2 | DESCTYPE_S, 0x8);
16765- gdt.s = 1;
16766+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16767+ 0x83 | DESCTYPE_S, 0xC);
16768 write_gdt_entry(get_cpu_gdt_table(cpu),
16769 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16770 #endif
16771@@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
16772 /* alrighty, percpu areas up and running */
16773 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16774 for_each_possible_cpu(cpu) {
16775+#ifdef CONFIG_CC_STACKPROTECTOR
16776+#ifdef CONFIG_X86_32
16777+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
16778+#endif
16779+#endif
16780 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16781 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16782 per_cpu(cpu_number, cpu) = cpu;
16783@@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
16784 early_per_cpu_map(x86_cpu_to_node_map, cpu);
16785 #endif
16786 #endif
16787+#ifdef CONFIG_CC_STACKPROTECTOR
16788+#ifdef CONFIG_X86_32
16789+ if (!cpu)
16790+ per_cpu(stack_canary.canary, cpu) = canary;
16791+#endif
16792+#endif
16793 /*
16794 * Up to this point, the boot CPU has been using .data.init
16795 * area. Reload any changed state for the boot CPU.
16796diff -urNp linux-2.6.32.46/arch/x86/kernel/signal.c linux-2.6.32.46/arch/x86/kernel/signal.c
16797--- linux-2.6.32.46/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
16798+++ linux-2.6.32.46/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
16799@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
16800 * Align the stack pointer according to the i386 ABI,
16801 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16802 */
16803- sp = ((sp + 4) & -16ul) - 4;
16804+ sp = ((sp - 12) & -16ul) - 4;
16805 #else /* !CONFIG_X86_32 */
16806 sp = round_down(sp, 16) - 8;
16807 #endif
16808@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16809 * Return an always-bogus address instead so we will die with SIGSEGV.
16810 */
16811 if (onsigstack && !likely(on_sig_stack(sp)))
16812- return (void __user *)-1L;
16813+ return (__force void __user *)-1L;
16814
16815 /* save i387 state */
16816 if (used_math() && save_i387_xstate(*fpstate) < 0)
16817- return (void __user *)-1L;
16818+ return (__force void __user *)-1L;
16819
16820 return (void __user *)sp;
16821 }
16822@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16823 }
16824
16825 if (current->mm->context.vdso)
16826- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16827+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16828 else
16829- restorer = &frame->retcode;
16830+ restorer = (void __user *)&frame->retcode;
16831 if (ka->sa.sa_flags & SA_RESTORER)
16832 restorer = ka->sa.sa_restorer;
16833
16834@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16835 * reasons and because gdb uses it as a signature to notice
16836 * signal handler stack frames.
16837 */
16838- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16839+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16840
16841 if (err)
16842 return -EFAULT;
16843@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16844 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16845
16846 /* Set up to return from userspace. */
16847- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16848+ if (current->mm->context.vdso)
16849+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16850+ else
16851+ restorer = (void __user *)&frame->retcode;
16852 if (ka->sa.sa_flags & SA_RESTORER)
16853 restorer = ka->sa.sa_restorer;
16854 put_user_ex(restorer, &frame->pretcode);
16855@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16856 * reasons and because gdb uses it as a signature to notice
16857 * signal handler stack frames.
16858 */
16859- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16860+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16861 } put_user_catch(err);
16862
16863 if (err)
16864@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16865 int signr;
16866 sigset_t *oldset;
16867
16868+ pax_track_stack();
16869+
16870 /*
16871 * We want the common case to go fast, which is why we may in certain
16872 * cases get here from kernel mode. Just return without doing anything
16873@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16874 * X86_32: vm86 regs switched out by assembly code before reaching
16875 * here, so testing against kernel CS suffices.
16876 */
16877- if (!user_mode(regs))
16878+ if (!user_mode_novm(regs))
16879 return;
16880
16881 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16882diff -urNp linux-2.6.32.46/arch/x86/kernel/smpboot.c linux-2.6.32.46/arch/x86/kernel/smpboot.c
16883--- linux-2.6.32.46/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16884+++ linux-2.6.32.46/arch/x86/kernel/smpboot.c 2011-07-01 19:10:03.000000000 -0400
16885@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16886 */
16887 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16888
16889-void cpu_hotplug_driver_lock()
16890+void cpu_hotplug_driver_lock(void)
16891 {
16892- mutex_lock(&x86_cpu_hotplug_driver_mutex);
16893+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
16894 }
16895
16896-void cpu_hotplug_driver_unlock()
16897+void cpu_hotplug_driver_unlock(void)
16898 {
16899- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16900+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16901 }
16902
16903 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16904@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_a
16905 * target processor state.
16906 */
16907 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
16908- (unsigned long)stack_start.sp);
16909+ stack_start);
16910
16911 /*
16912 * Run STARTUP IPI loop.
16913@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16914 set_idle_for_cpu(cpu, c_idle.idle);
16915 do_rest:
16916 per_cpu(current_task, cpu) = c_idle.idle;
16917+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16918 #ifdef CONFIG_X86_32
16919 /* Stack for startup_32 can be just as for start_secondary onwards */
16920 irq_ctx_init(cpu);
16921@@ -750,13 +751,15 @@ do_rest:
16922 #else
16923 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16924 initial_gs = per_cpu_offset(cpu);
16925- per_cpu(kernel_stack, cpu) =
16926- (unsigned long)task_stack_page(c_idle.idle) -
16927- KERNEL_STACK_OFFSET + THREAD_SIZE;
16928+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16929 #endif
16930+
16931+ pax_open_kernel();
16932 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16933+ pax_close_kernel();
16934+
16935 initial_code = (unsigned long)start_secondary;
16936- stack_start.sp = (void *) c_idle.idle->thread.sp;
16937+ stack_start = c_idle.idle->thread.sp;
16938
16939 /* start_ip had better be page-aligned! */
16940 start_ip = setup_trampoline();
16941@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16942
16943 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16944
16945+#ifdef CONFIG_PAX_PER_CPU_PGD
16946+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16947+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16948+ KERNEL_PGD_PTRS);
16949+#endif
16950+
16951 err = do_boot_cpu(apicid, cpu);
16952
16953 if (err) {
16954diff -urNp linux-2.6.32.46/arch/x86/kernel/step.c linux-2.6.32.46/arch/x86/kernel/step.c
16955--- linux-2.6.32.46/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16956+++ linux-2.6.32.46/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16957@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16958 struct desc_struct *desc;
16959 unsigned long base;
16960
16961- seg &= ~7UL;
16962+ seg >>= 3;
16963
16964 mutex_lock(&child->mm->context.lock);
16965- if (unlikely((seg >> 3) >= child->mm->context.size))
16966+ if (unlikely(seg >= child->mm->context.size))
16967 addr = -1L; /* bogus selector, access would fault */
16968 else {
16969 desc = child->mm->context.ldt + seg;
16970@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16971 addr += base;
16972 }
16973 mutex_unlock(&child->mm->context.lock);
16974- }
16975+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16976+ addr = ktla_ktva(addr);
16977
16978 return addr;
16979 }
16980@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16981 unsigned char opcode[15];
16982 unsigned long addr = convert_ip_to_linear(child, regs);
16983
16984+ if (addr == -EINVAL)
16985+ return 0;
16986+
16987 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16988 for (i = 0; i < copied; i++) {
16989 switch (opcode[i]) {
16990@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16991
16992 #ifdef CONFIG_X86_64
16993 case 0x40 ... 0x4f:
16994- if (regs->cs != __USER_CS)
16995+ if ((regs->cs & 0xffff) != __USER_CS)
16996 /* 32-bit mode: register increment */
16997 return 0;
16998 /* 64-bit mode: REX prefix */
16999diff -urNp linux-2.6.32.46/arch/x86/kernel/syscall_table_32.S linux-2.6.32.46/arch/x86/kernel/syscall_table_32.S
17000--- linux-2.6.32.46/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
17001+++ linux-2.6.32.46/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
17002@@ -1,3 +1,4 @@
17003+.section .rodata,"a",@progbits
17004 ENTRY(sys_call_table)
17005 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17006 .long sys_exit
17007diff -urNp linux-2.6.32.46/arch/x86/kernel/sys_i386_32.c linux-2.6.32.46/arch/x86/kernel/sys_i386_32.c
17008--- linux-2.6.32.46/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
17009+++ linux-2.6.32.46/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
17010@@ -24,6 +24,21 @@
17011
17012 #include <asm/syscalls.h>
17013
17014+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
17015+{
17016+ unsigned long pax_task_size = TASK_SIZE;
17017+
17018+#ifdef CONFIG_PAX_SEGMEXEC
17019+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
17020+ pax_task_size = SEGMEXEC_TASK_SIZE;
17021+#endif
17022+
17023+ if (len > pax_task_size || addr > pax_task_size - len)
17024+ return -EINVAL;
17025+
17026+ return 0;
17027+}
17028+
17029 /*
17030 * Perform the select(nd, in, out, ex, tv) and mmap() system
17031 * calls. Linux/i386 didn't use to be able to handle more than
17032@@ -58,6 +73,212 @@ out:
17033 return err;
17034 }
17035
17036+unsigned long
17037+arch_get_unmapped_area(struct file *filp, unsigned long addr,
17038+ unsigned long len, unsigned long pgoff, unsigned long flags)
17039+{
17040+ struct mm_struct *mm = current->mm;
17041+ struct vm_area_struct *vma;
17042+ unsigned long start_addr, pax_task_size = TASK_SIZE;
17043+
17044+#ifdef CONFIG_PAX_SEGMEXEC
17045+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17046+ pax_task_size = SEGMEXEC_TASK_SIZE;
17047+#endif
17048+
17049+ pax_task_size -= PAGE_SIZE;
17050+
17051+ if (len > pax_task_size)
17052+ return -ENOMEM;
17053+
17054+ if (flags & MAP_FIXED)
17055+ return addr;
17056+
17057+#ifdef CONFIG_PAX_RANDMMAP
17058+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17059+#endif
17060+
17061+ if (addr) {
17062+ addr = PAGE_ALIGN(addr);
17063+ if (pax_task_size - len >= addr) {
17064+ vma = find_vma(mm, addr);
17065+ if (check_heap_stack_gap(vma, addr, len))
17066+ return addr;
17067+ }
17068+ }
17069+ if (len > mm->cached_hole_size) {
17070+ start_addr = addr = mm->free_area_cache;
17071+ } else {
17072+ start_addr = addr = mm->mmap_base;
17073+ mm->cached_hole_size = 0;
17074+ }
17075+
17076+#ifdef CONFIG_PAX_PAGEEXEC
17077+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17078+ start_addr = 0x00110000UL;
17079+
17080+#ifdef CONFIG_PAX_RANDMMAP
17081+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17082+ start_addr += mm->delta_mmap & 0x03FFF000UL;
17083+#endif
17084+
17085+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17086+ start_addr = addr = mm->mmap_base;
17087+ else
17088+ addr = start_addr;
17089+ }
17090+#endif
17091+
17092+full_search:
17093+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17094+ /* At this point: (!vma || addr < vma->vm_end). */
17095+ if (pax_task_size - len < addr) {
17096+ /*
17097+ * Start a new search - just in case we missed
17098+ * some holes.
17099+ */
17100+ if (start_addr != mm->mmap_base) {
17101+ start_addr = addr = mm->mmap_base;
17102+ mm->cached_hole_size = 0;
17103+ goto full_search;
17104+ }
17105+ return -ENOMEM;
17106+ }
17107+ if (check_heap_stack_gap(vma, addr, len))
17108+ break;
17109+ if (addr + mm->cached_hole_size < vma->vm_start)
17110+ mm->cached_hole_size = vma->vm_start - addr;
17111+ addr = vma->vm_end;
17112+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
17113+ start_addr = addr = mm->mmap_base;
17114+ mm->cached_hole_size = 0;
17115+ goto full_search;
17116+ }
17117+ }
17118+
17119+ /*
17120+ * Remember the place where we stopped the search:
17121+ */
17122+ mm->free_area_cache = addr + len;
17123+ return addr;
17124+}
17125+
17126+unsigned long
17127+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17128+ const unsigned long len, const unsigned long pgoff,
17129+ const unsigned long flags)
17130+{
17131+ struct vm_area_struct *vma;
17132+ struct mm_struct *mm = current->mm;
17133+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17134+
17135+#ifdef CONFIG_PAX_SEGMEXEC
17136+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17137+ pax_task_size = SEGMEXEC_TASK_SIZE;
17138+#endif
17139+
17140+ pax_task_size -= PAGE_SIZE;
17141+
17142+ /* requested length too big for entire address space */
17143+ if (len > pax_task_size)
17144+ return -ENOMEM;
17145+
17146+ if (flags & MAP_FIXED)
17147+ return addr;
17148+
17149+#ifdef CONFIG_PAX_PAGEEXEC
17150+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17151+ goto bottomup;
17152+#endif
17153+
17154+#ifdef CONFIG_PAX_RANDMMAP
17155+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17156+#endif
17157+
17158+ /* requesting a specific address */
17159+ if (addr) {
17160+ addr = PAGE_ALIGN(addr);
17161+ if (pax_task_size - len >= addr) {
17162+ vma = find_vma(mm, addr);
17163+ if (check_heap_stack_gap(vma, addr, len))
17164+ return addr;
17165+ }
17166+ }
17167+
17168+ /* check if free_area_cache is useful for us */
17169+ if (len <= mm->cached_hole_size) {
17170+ mm->cached_hole_size = 0;
17171+ mm->free_area_cache = mm->mmap_base;
17172+ }
17173+
17174+ /* either no address requested or can't fit in requested address hole */
17175+ addr = mm->free_area_cache;
17176+
17177+ /* make sure it can fit in the remaining address space */
17178+ if (addr > len) {
17179+ vma = find_vma(mm, addr-len);
17180+ if (check_heap_stack_gap(vma, addr - len, len))
17181+ /* remember the address as a hint for next time */
17182+ return (mm->free_area_cache = addr-len);
17183+ }
17184+
17185+ if (mm->mmap_base < len)
17186+ goto bottomup;
17187+
17188+ addr = mm->mmap_base-len;
17189+
17190+ do {
17191+ /*
17192+ * Lookup failure means no vma is above this address,
17193+ * else if new region fits below vma->vm_start,
17194+ * return with success:
17195+ */
17196+ vma = find_vma(mm, addr);
17197+ if (check_heap_stack_gap(vma, addr, len))
17198+ /* remember the address as a hint for next time */
17199+ return (mm->free_area_cache = addr);
17200+
17201+ /* remember the largest hole we saw so far */
17202+ if (addr + mm->cached_hole_size < vma->vm_start)
17203+ mm->cached_hole_size = vma->vm_start - addr;
17204+
17205+ /* try just below the current vma->vm_start */
17206+ addr = skip_heap_stack_gap(vma, len);
17207+ } while (!IS_ERR_VALUE(addr));
17208+
17209+bottomup:
17210+ /*
17211+ * A failed mmap() very likely causes application failure,
17212+ * so fall back to the bottom-up function here. This scenario
17213+ * can happen with large stack limits and large mmap()
17214+ * allocations.
17215+ */
17216+
17217+#ifdef CONFIG_PAX_SEGMEXEC
17218+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17219+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17220+ else
17221+#endif
17222+
17223+ mm->mmap_base = TASK_UNMAPPED_BASE;
17224+
17225+#ifdef CONFIG_PAX_RANDMMAP
17226+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17227+ mm->mmap_base += mm->delta_mmap;
17228+#endif
17229+
17230+ mm->free_area_cache = mm->mmap_base;
17231+ mm->cached_hole_size = ~0UL;
17232+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17233+ /*
17234+ * Restore the topdown base:
17235+ */
17236+ mm->mmap_base = base;
17237+ mm->free_area_cache = base;
17238+ mm->cached_hole_size = ~0UL;
17239+
17240+ return addr;
17241+}
17242
17243 struct sel_arg_struct {
17244 unsigned long n;
17245@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
17246 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
17247 case SEMTIMEDOP:
17248 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
17249- (const struct timespec __user *)fifth);
17250+ (__force const struct timespec __user *)fifth);
17251
17252 case SEMGET:
17253 return sys_semget(first, second, third);
17254@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
17255 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
17256 if (ret)
17257 return ret;
17258- return put_user(raddr, (ulong __user *) third);
17259+ return put_user(raddr, (__force ulong __user *) third);
17260 }
17261 case 1: /* iBCS2 emulator entry point */
17262 if (!segment_eq(get_fs(), get_ds()))
17263@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
17264
17265 return error;
17266 }
17267-
17268-
17269-/*
17270- * Do a system call from kernel instead of calling sys_execve so we
17271- * end up with proper pt_regs.
17272- */
17273-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
17274-{
17275- long __res;
17276- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
17277- : "=a" (__res)
17278- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
17279- return __res;
17280-}
17281diff -urNp linux-2.6.32.46/arch/x86/kernel/sys_x86_64.c linux-2.6.32.46/arch/x86/kernel/sys_x86_64.c
17282--- linux-2.6.32.46/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
17283+++ linux-2.6.32.46/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
17284@@ -32,8 +32,8 @@ out:
17285 return error;
17286 }
17287
17288-static void find_start_end(unsigned long flags, unsigned long *begin,
17289- unsigned long *end)
17290+static void find_start_end(struct mm_struct *mm, unsigned long flags,
17291+ unsigned long *begin, unsigned long *end)
17292 {
17293 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17294 unsigned long new_begin;
17295@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
17296 *begin = new_begin;
17297 }
17298 } else {
17299- *begin = TASK_UNMAPPED_BASE;
17300+ *begin = mm->mmap_base;
17301 *end = TASK_SIZE;
17302 }
17303 }
17304@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
17305 if (flags & MAP_FIXED)
17306 return addr;
17307
17308- find_start_end(flags, &begin, &end);
17309+ find_start_end(mm, flags, &begin, &end);
17310
17311 if (len > end)
17312 return -ENOMEM;
17313
17314+#ifdef CONFIG_PAX_RANDMMAP
17315+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17316+#endif
17317+
17318 if (addr) {
17319 addr = PAGE_ALIGN(addr);
17320 vma = find_vma(mm, addr);
17321- if (end - len >= addr &&
17322- (!vma || addr + len <= vma->vm_start))
17323+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17324 return addr;
17325 }
17326 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17327@@ -106,7 +109,7 @@ full_search:
17328 }
17329 return -ENOMEM;
17330 }
17331- if (!vma || addr + len <= vma->vm_start) {
17332+ if (check_heap_stack_gap(vma, addr, len)) {
17333 /*
17334 * Remember the place where we stopped the search:
17335 */
17336@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
17337 {
17338 struct vm_area_struct *vma;
17339 struct mm_struct *mm = current->mm;
17340- unsigned long addr = addr0;
17341+ unsigned long base = mm->mmap_base, addr = addr0;
17342
17343 /* requested length too big for entire address space */
17344 if (len > TASK_SIZE)
17345@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
17346 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17347 goto bottomup;
17348
17349+#ifdef CONFIG_PAX_RANDMMAP
17350+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17351+#endif
17352+
17353 /* requesting a specific address */
17354 if (addr) {
17355 addr = PAGE_ALIGN(addr);
17356- vma = find_vma(mm, addr);
17357- if (TASK_SIZE - len >= addr &&
17358- (!vma || addr + len <= vma->vm_start))
17359- return addr;
17360+ if (TASK_SIZE - len >= addr) {
17361+ vma = find_vma(mm, addr);
17362+ if (check_heap_stack_gap(vma, addr, len))
17363+ return addr;
17364+ }
17365 }
17366
17367 /* check if free_area_cache is useful for us */
17368@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
17369 /* make sure it can fit in the remaining address space */
17370 if (addr > len) {
17371 vma = find_vma(mm, addr-len);
17372- if (!vma || addr <= vma->vm_start)
17373+ if (check_heap_stack_gap(vma, addr - len, len))
17374 /* remember the address as a hint for next time */
17375 return mm->free_area_cache = addr-len;
17376 }
17377@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
17378 * return with success:
17379 */
17380 vma = find_vma(mm, addr);
17381- if (!vma || addr+len <= vma->vm_start)
17382+ if (check_heap_stack_gap(vma, addr, len))
17383 /* remember the address as a hint for next time */
17384 return mm->free_area_cache = addr;
17385
17386@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
17387 mm->cached_hole_size = vma->vm_start - addr;
17388
17389 /* try just below the current vma->vm_start */
17390- addr = vma->vm_start-len;
17391- } while (len < vma->vm_start);
17392+ addr = skip_heap_stack_gap(vma, len);
17393+ } while (!IS_ERR_VALUE(addr));
17394
17395 bottomup:
17396 /*
17397@@ -198,13 +206,21 @@ bottomup:
17398 * can happen with large stack limits and large mmap()
17399 * allocations.
17400 */
17401+ mm->mmap_base = TASK_UNMAPPED_BASE;
17402+
17403+#ifdef CONFIG_PAX_RANDMMAP
17404+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17405+ mm->mmap_base += mm->delta_mmap;
17406+#endif
17407+
17408+ mm->free_area_cache = mm->mmap_base;
17409 mm->cached_hole_size = ~0UL;
17410- mm->free_area_cache = TASK_UNMAPPED_BASE;
17411 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17412 /*
17413 * Restore the topdown base:
17414 */
17415- mm->free_area_cache = mm->mmap_base;
17416+ mm->mmap_base = base;
17417+ mm->free_area_cache = base;
17418 mm->cached_hole_size = ~0UL;
17419
17420 return addr;
17421diff -urNp linux-2.6.32.46/arch/x86/kernel/tboot.c linux-2.6.32.46/arch/x86/kernel/tboot.c
17422--- linux-2.6.32.46/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
17423+++ linux-2.6.32.46/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
17424@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
17425
17426 void tboot_shutdown(u32 shutdown_type)
17427 {
17428- void (*shutdown)(void);
17429+ void (* __noreturn shutdown)(void);
17430
17431 if (!tboot_enabled())
17432 return;
17433@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
17434
17435 switch_to_tboot_pt();
17436
17437- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17438+ shutdown = (void *)tboot->shutdown_entry;
17439 shutdown();
17440
17441 /* should not reach here */
17442@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
17443 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17444 }
17445
17446-static atomic_t ap_wfs_count;
17447+static atomic_unchecked_t ap_wfs_count;
17448
17449 static int tboot_wait_for_aps(int num_aps)
17450 {
17451@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
17452 {
17453 switch (action) {
17454 case CPU_DYING:
17455- atomic_inc(&ap_wfs_count);
17456+ atomic_inc_unchecked(&ap_wfs_count);
17457 if (num_online_cpus() == 1)
17458- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17459+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17460 return NOTIFY_BAD;
17461 break;
17462 }
17463@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
17464
17465 tboot_create_trampoline();
17466
17467- atomic_set(&ap_wfs_count, 0);
17468+ atomic_set_unchecked(&ap_wfs_count, 0);
17469 register_hotcpu_notifier(&tboot_cpu_notifier);
17470 return 0;
17471 }
17472diff -urNp linux-2.6.32.46/arch/x86/kernel/time.c linux-2.6.32.46/arch/x86/kernel/time.c
17473--- linux-2.6.32.46/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
17474+++ linux-2.6.32.46/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
17475@@ -26,17 +26,13 @@
17476 int timer_ack;
17477 #endif
17478
17479-#ifdef CONFIG_X86_64
17480-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
17481-#endif
17482-
17483 unsigned long profile_pc(struct pt_regs *regs)
17484 {
17485 unsigned long pc = instruction_pointer(regs);
17486
17487- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17488+ if (!user_mode(regs) && in_lock_functions(pc)) {
17489 #ifdef CONFIG_FRAME_POINTER
17490- return *(unsigned long *)(regs->bp + sizeof(long));
17491+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17492 #else
17493 unsigned long *sp =
17494 (unsigned long *)kernel_stack_pointer(regs);
17495@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
17496 * or above a saved flags. Eflags has bits 22-31 zero,
17497 * kernel addresses don't.
17498 */
17499+
17500+#ifdef CONFIG_PAX_KERNEXEC
17501+ return ktla_ktva(sp[0]);
17502+#else
17503 if (sp[0] >> 22)
17504 return sp[0];
17505 if (sp[1] >> 22)
17506 return sp[1];
17507 #endif
17508+
17509+#endif
17510 }
17511 return pc;
17512 }
17513diff -urNp linux-2.6.32.46/arch/x86/kernel/tls.c linux-2.6.32.46/arch/x86/kernel/tls.c
17514--- linux-2.6.32.46/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
17515+++ linux-2.6.32.46/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
17516@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
17517 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17518 return -EINVAL;
17519
17520+#ifdef CONFIG_PAX_SEGMEXEC
17521+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17522+ return -EINVAL;
17523+#endif
17524+
17525 set_tls_desc(p, idx, &info, 1);
17526
17527 return 0;
17528diff -urNp linux-2.6.32.46/arch/x86/kernel/trampoline_32.S linux-2.6.32.46/arch/x86/kernel/trampoline_32.S
17529--- linux-2.6.32.46/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
17530+++ linux-2.6.32.46/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
17531@@ -32,6 +32,12 @@
17532 #include <asm/segment.h>
17533 #include <asm/page_types.h>
17534
17535+#ifdef CONFIG_PAX_KERNEXEC
17536+#define ta(X) (X)
17537+#else
17538+#define ta(X) ((X) - __PAGE_OFFSET)
17539+#endif
17540+
17541 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
17542 __CPUINITRODATA
17543 .code16
17544@@ -60,7 +66,7 @@ r_base = .
17545 inc %ax # protected mode (PE) bit
17546 lmsw %ax # into protected mode
17547 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17548- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17549+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
17550
17551 # These need to be in the same 64K segment as the above;
17552 # hence we don't use the boot_gdt_descr defined in head.S
17553diff -urNp linux-2.6.32.46/arch/x86/kernel/trampoline_64.S linux-2.6.32.46/arch/x86/kernel/trampoline_64.S
17554--- linux-2.6.32.46/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
17555+++ linux-2.6.32.46/arch/x86/kernel/trampoline_64.S 2011-07-01 18:53:26.000000000 -0400
17556@@ -91,7 +91,7 @@ startup_32:
17557 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17558 movl %eax, %ds
17559
17560- movl $X86_CR4_PAE, %eax
17561+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17562 movl %eax, %cr4 # Enable PAE mode
17563
17564 # Setup trampoline 4 level pagetables
17565@@ -127,7 +127,7 @@ startup_64:
17566 no_longmode:
17567 hlt
17568 jmp no_longmode
17569-#include "verify_cpu_64.S"
17570+#include "verify_cpu.S"
17571
17572 # Careful these need to be in the same 64K segment as the above;
17573 tidt:
17574@@ -138,7 +138,7 @@ tidt:
17575 # so the kernel can live anywhere
17576 .balign 4
17577 tgdt:
17578- .short tgdt_end - tgdt # gdt limit
17579+ .short tgdt_end - tgdt - 1 # gdt limit
17580 .long tgdt - r_base
17581 .short 0
17582 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17583diff -urNp linux-2.6.32.46/arch/x86/kernel/traps.c linux-2.6.32.46/arch/x86/kernel/traps.c
17584--- linux-2.6.32.46/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
17585+++ linux-2.6.32.46/arch/x86/kernel/traps.c 2011-07-06 19:53:33.000000000 -0400
17586@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
17587
17588 /* Do we ignore FPU interrupts ? */
17589 char ignore_fpu_irq;
17590-
17591-/*
17592- * The IDT has to be page-aligned to simplify the Pentium
17593- * F0 0F bug workaround.
17594- */
17595-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17596 #endif
17597
17598 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17599@@ -112,19 +106,19 @@ static inline void preempt_conditional_c
17600 static inline void
17601 die_if_kernel(const char *str, struct pt_regs *regs, long err)
17602 {
17603- if (!user_mode_vm(regs))
17604+ if (!user_mode(regs))
17605 die(str, regs, err);
17606 }
17607 #endif
17608
17609 static void __kprobes
17610-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17611+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17612 long error_code, siginfo_t *info)
17613 {
17614 struct task_struct *tsk = current;
17615
17616 #ifdef CONFIG_X86_32
17617- if (regs->flags & X86_VM_MASK) {
17618+ if (v8086_mode(regs)) {
17619 /*
17620 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17621 * On nmi (interrupt 2), do_trap should not be called.
17622@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
17623 }
17624 #endif
17625
17626- if (!user_mode(regs))
17627+ if (!user_mode_novm(regs))
17628 goto kernel_trap;
17629
17630 #ifdef CONFIG_X86_32
17631@@ -158,7 +152,7 @@ trap_signal:
17632 printk_ratelimit()) {
17633 printk(KERN_INFO
17634 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17635- tsk->comm, tsk->pid, str,
17636+ tsk->comm, task_pid_nr(tsk), str,
17637 regs->ip, regs->sp, error_code);
17638 print_vma_addr(" in ", regs->ip);
17639 printk("\n");
17640@@ -175,8 +169,20 @@ kernel_trap:
17641 if (!fixup_exception(regs)) {
17642 tsk->thread.error_code = error_code;
17643 tsk->thread.trap_no = trapnr;
17644+
17645+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17646+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17647+ str = "PAX: suspicious stack segment fault";
17648+#endif
17649+
17650 die(str, regs, error_code);
17651 }
17652+
17653+#ifdef CONFIG_PAX_REFCOUNT
17654+ if (trapnr == 4)
17655+ pax_report_refcount_overflow(regs);
17656+#endif
17657+
17658 return;
17659
17660 #ifdef CONFIG_X86_32
17661@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
17662 conditional_sti(regs);
17663
17664 #ifdef CONFIG_X86_32
17665- if (regs->flags & X86_VM_MASK)
17666+ if (v8086_mode(regs))
17667 goto gp_in_vm86;
17668 #endif
17669
17670 tsk = current;
17671- if (!user_mode(regs))
17672+ if (!user_mode_novm(regs))
17673 goto gp_in_kernel;
17674
17675+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17676+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17677+ struct mm_struct *mm = tsk->mm;
17678+ unsigned long limit;
17679+
17680+ down_write(&mm->mmap_sem);
17681+ limit = mm->context.user_cs_limit;
17682+ if (limit < TASK_SIZE) {
17683+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17684+ up_write(&mm->mmap_sem);
17685+ return;
17686+ }
17687+ up_write(&mm->mmap_sem);
17688+ }
17689+#endif
17690+
17691 tsk->thread.error_code = error_code;
17692 tsk->thread.trap_no = 13;
17693
17694@@ -305,6 +327,13 @@ gp_in_kernel:
17695 if (notify_die(DIE_GPF, "general protection fault", regs,
17696 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17697 return;
17698+
17699+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17700+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17701+ die("PAX: suspicious general protection fault", regs, error_code);
17702+ else
17703+#endif
17704+
17705 die("general protection fault", regs, error_code);
17706 }
17707
17708@@ -435,6 +464,17 @@ static notrace __kprobes void default_do
17709 dotraplinkage notrace __kprobes void
17710 do_nmi(struct pt_regs *regs, long error_code)
17711 {
17712+
17713+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17714+ if (!user_mode(regs)) {
17715+ unsigned long cs = regs->cs & 0xFFFF;
17716+ unsigned long ip = ktva_ktla(regs->ip);
17717+
17718+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17719+ regs->ip = ip;
17720+ }
17721+#endif
17722+
17723 nmi_enter();
17724
17725 inc_irq_stat(__nmi_count);
17726@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(st
17727 }
17728
17729 #ifdef CONFIG_X86_32
17730- if (regs->flags & X86_VM_MASK)
17731+ if (v8086_mode(regs))
17732 goto debug_vm86;
17733 #endif
17734
17735@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(st
17736 * kernel space (but re-enable TF when returning to user mode).
17737 */
17738 if (condition & DR_STEP) {
17739- if (!user_mode(regs))
17740+ if (!user_mode_novm(regs))
17741 goto clear_TF_reenable;
17742 }
17743
17744@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs
17745 * Handle strange cache flush from user space exception
17746 * in all other cases. This is undocumented behaviour.
17747 */
17748- if (regs->flags & X86_VM_MASK) {
17749+ if (v8086_mode(regs)) {
17750 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
17751 return;
17752 }
17753@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) sm
17754 void __math_state_restore(void)
17755 {
17756 struct thread_info *thread = current_thread_info();
17757- struct task_struct *tsk = thread->task;
17758+ struct task_struct *tsk = current;
17759
17760 /*
17761 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17762@@ -825,8 +865,7 @@ void __math_state_restore(void)
17763 */
17764 asmlinkage void math_state_restore(void)
17765 {
17766- struct thread_info *thread = current_thread_info();
17767- struct task_struct *tsk = thread->task;
17768+ struct task_struct *tsk = current;
17769
17770 if (!tsk_used_math(tsk)) {
17771 local_irq_enable();
17772diff -urNp linux-2.6.32.46/arch/x86/kernel/verify_cpu_64.S linux-2.6.32.46/arch/x86/kernel/verify_cpu_64.S
17773--- linux-2.6.32.46/arch/x86/kernel/verify_cpu_64.S 2011-03-27 14:31:47.000000000 -0400
17774+++ linux-2.6.32.46/arch/x86/kernel/verify_cpu_64.S 1969-12-31 19:00:00.000000000 -0500
17775@@ -1,105 +0,0 @@
17776-/*
17777- *
17778- * verify_cpu.S - Code for cpu long mode and SSE verification. This
17779- * code has been borrowed from boot/setup.S and was introduced by
17780- * Andi Kleen.
17781- *
17782- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17783- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17784- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17785- *
17786- * This source code is licensed under the GNU General Public License,
17787- * Version 2. See the file COPYING for more details.
17788- *
17789- * This is a common code for verification whether CPU supports
17790- * long mode and SSE or not. It is not called directly instead this
17791- * file is included at various places and compiled in that context.
17792- * Following are the current usage.
17793- *
17794- * This file is included by both 16bit and 32bit code.
17795- *
17796- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
17797- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
17798- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
17799- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
17800- *
17801- * verify_cpu, returns the status of cpu check in register %eax.
17802- * 0: Success 1: Failure
17803- *
17804- * The caller needs to check for the error code and take the action
17805- * appropriately. Either display a message or halt.
17806- */
17807-
17808-#include <asm/cpufeature.h>
17809-
17810-verify_cpu:
17811- pushfl # Save caller passed flags
17812- pushl $0 # Kill any dangerous flags
17813- popfl
17814-
17815- pushfl # standard way to check for cpuid
17816- popl %eax
17817- movl %eax,%ebx
17818- xorl $0x200000,%eax
17819- pushl %eax
17820- popfl
17821- pushfl
17822- popl %eax
17823- cmpl %eax,%ebx
17824- jz verify_cpu_no_longmode # cpu has no cpuid
17825-
17826- movl $0x0,%eax # See if cpuid 1 is implemented
17827- cpuid
17828- cmpl $0x1,%eax
17829- jb verify_cpu_no_longmode # no cpuid 1
17830-
17831- xor %di,%di
17832- cmpl $0x68747541,%ebx # AuthenticAMD
17833- jnz verify_cpu_noamd
17834- cmpl $0x69746e65,%edx
17835- jnz verify_cpu_noamd
17836- cmpl $0x444d4163,%ecx
17837- jnz verify_cpu_noamd
17838- mov $1,%di # cpu is from AMD
17839-
17840-verify_cpu_noamd:
17841- movl $0x1,%eax # Does the cpu have what it takes
17842- cpuid
17843- andl $REQUIRED_MASK0,%edx
17844- xorl $REQUIRED_MASK0,%edx
17845- jnz verify_cpu_no_longmode
17846-
17847- movl $0x80000000,%eax # See if extended cpuid is implemented
17848- cpuid
17849- cmpl $0x80000001,%eax
17850- jb verify_cpu_no_longmode # no extended cpuid
17851-
17852- movl $0x80000001,%eax # Does the cpu have what it takes
17853- cpuid
17854- andl $REQUIRED_MASK1,%edx
17855- xorl $REQUIRED_MASK1,%edx
17856- jnz verify_cpu_no_longmode
17857-
17858-verify_cpu_sse_test:
17859- movl $1,%eax
17860- cpuid
17861- andl $SSE_MASK,%edx
17862- cmpl $SSE_MASK,%edx
17863- je verify_cpu_sse_ok
17864- test %di,%di
17865- jz verify_cpu_no_longmode # only try to force SSE on AMD
17866- movl $0xc0010015,%ecx # HWCR
17867- rdmsr
17868- btr $15,%eax # enable SSE
17869- wrmsr
17870- xor %di,%di # don't loop
17871- jmp verify_cpu_sse_test # try again
17872-
17873-verify_cpu_no_longmode:
17874- popfl # Restore caller passed flags
17875- movl $1,%eax
17876- ret
17877-verify_cpu_sse_ok:
17878- popfl # Restore caller passed flags
17879- xorl %eax, %eax
17880- ret
17881diff -urNp linux-2.6.32.46/arch/x86/kernel/verify_cpu.S linux-2.6.32.46/arch/x86/kernel/verify_cpu.S
17882--- linux-2.6.32.46/arch/x86/kernel/verify_cpu.S 1969-12-31 19:00:00.000000000 -0500
17883+++ linux-2.6.32.46/arch/x86/kernel/verify_cpu.S 2011-07-01 18:28:42.000000000 -0400
17884@@ -0,0 +1,140 @@
17885+/*
17886+ *
17887+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
17888+ * code has been borrowed from boot/setup.S and was introduced by
17889+ * Andi Kleen.
17890+ *
17891+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17892+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17893+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17894+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
17895+ *
17896+ * This source code is licensed under the GNU General Public License,
17897+ * Version 2. See the file COPYING for more details.
17898+ *
17899+ * This is a common code for verification whether CPU supports
17900+ * long mode and SSE or not. It is not called directly instead this
17901+ * file is included at various places and compiled in that context.
17902+ * This file is expected to run in 32bit code. Currently:
17903+ *
17904+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17905+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
17906+ * arch/x86/kernel/head_32.S: processor startup
17907+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17908+ *
17909+ * verify_cpu, returns the status of longmode and SSE in register %eax.
17910+ * 0: Success 1: Failure
17911+ *
17912+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
17913+ *
17914+ * The caller needs to check for the error code and take the action
17915+ * appropriately. Either display a message or halt.
17916+ */
17917+
17918+#include <asm/cpufeature.h>
17919+#include <asm/msr-index.h>
17920+
17921+verify_cpu:
17922+ pushfl # Save caller passed flags
17923+ pushl $0 # Kill any dangerous flags
17924+ popfl
17925+
17926+ pushfl # standard way to check for cpuid
17927+ popl %eax
17928+ movl %eax,%ebx
17929+ xorl $0x200000,%eax
17930+ pushl %eax
17931+ popfl
17932+ pushfl
17933+ popl %eax
17934+ cmpl %eax,%ebx
17935+ jz verify_cpu_no_longmode # cpu has no cpuid
17936+
17937+ movl $0x0,%eax # See if cpuid 1 is implemented
17938+ cpuid
17939+ cmpl $0x1,%eax
17940+ jb verify_cpu_no_longmode # no cpuid 1
17941+
17942+ xor %di,%di
17943+ cmpl $0x68747541,%ebx # AuthenticAMD
17944+ jnz verify_cpu_noamd
17945+ cmpl $0x69746e65,%edx
17946+ jnz verify_cpu_noamd
17947+ cmpl $0x444d4163,%ecx
17948+ jnz verify_cpu_noamd
17949+ mov $1,%di # cpu is from AMD
17950+ jmp verify_cpu_check
17951+
17952+verify_cpu_noamd:
17953+ cmpl $0x756e6547,%ebx # GenuineIntel?
17954+ jnz verify_cpu_check
17955+ cmpl $0x49656e69,%edx
17956+ jnz verify_cpu_check
17957+ cmpl $0x6c65746e,%ecx
17958+ jnz verify_cpu_check
17959+
17960+ # only call IA32_MISC_ENABLE when:
17961+ # family > 6 || (family == 6 && model >= 0xd)
17962+ movl $0x1, %eax # check CPU family and model
17963+ cpuid
17964+ movl %eax, %ecx
17965+
17966+ andl $0x0ff00f00, %eax # mask family and extended family
17967+ shrl $8, %eax
17968+ cmpl $6, %eax
17969+ ja verify_cpu_clear_xd # family > 6, ok
17970+ jb verify_cpu_check # family < 6, skip
17971+
17972+ andl $0x000f00f0, %ecx # mask model and extended model
17973+ shrl $4, %ecx
17974+ cmpl $0xd, %ecx
17975+ jb verify_cpu_check # family == 6, model < 0xd, skip
17976+
17977+verify_cpu_clear_xd:
17978+ movl $MSR_IA32_MISC_ENABLE, %ecx
17979+ rdmsr
17980+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
17981+ jnc verify_cpu_check # only write MSR if bit was changed
17982+ wrmsr
17983+
17984+verify_cpu_check:
17985+ movl $0x1,%eax # Does the cpu have what it takes
17986+ cpuid
17987+ andl $REQUIRED_MASK0,%edx
17988+ xorl $REQUIRED_MASK0,%edx
17989+ jnz verify_cpu_no_longmode
17990+
17991+ movl $0x80000000,%eax # See if extended cpuid is implemented
17992+ cpuid
17993+ cmpl $0x80000001,%eax
17994+ jb verify_cpu_no_longmode # no extended cpuid
17995+
17996+ movl $0x80000001,%eax # Does the cpu have what it takes
17997+ cpuid
17998+ andl $REQUIRED_MASK1,%edx
17999+ xorl $REQUIRED_MASK1,%edx
18000+ jnz verify_cpu_no_longmode
18001+
18002+verify_cpu_sse_test:
18003+ movl $1,%eax
18004+ cpuid
18005+ andl $SSE_MASK,%edx
18006+ cmpl $SSE_MASK,%edx
18007+ je verify_cpu_sse_ok
18008+ test %di,%di
18009+ jz verify_cpu_no_longmode # only try to force SSE on AMD
18010+ movl $MSR_K7_HWCR,%ecx
18011+ rdmsr
18012+ btr $15,%eax # enable SSE
18013+ wrmsr
18014+ xor %di,%di # don't loop
18015+ jmp verify_cpu_sse_test # try again
18016+
18017+verify_cpu_no_longmode:
18018+ popfl # Restore caller passed flags
18019+ movl $1,%eax
18020+ ret
18021+verify_cpu_sse_ok:
18022+ popfl # Restore caller passed flags
18023+ xorl %eax, %eax
18024+ ret
18025diff -urNp linux-2.6.32.46/arch/x86/kernel/vm86_32.c linux-2.6.32.46/arch/x86/kernel/vm86_32.c
18026--- linux-2.6.32.46/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
18027+++ linux-2.6.32.46/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
18028@@ -41,6 +41,7 @@
18029 #include <linux/ptrace.h>
18030 #include <linux/audit.h>
18031 #include <linux/stddef.h>
18032+#include <linux/grsecurity.h>
18033
18034 #include <asm/uaccess.h>
18035 #include <asm/io.h>
18036@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
18037 do_exit(SIGSEGV);
18038 }
18039
18040- tss = &per_cpu(init_tss, get_cpu());
18041+ tss = init_tss + get_cpu();
18042 current->thread.sp0 = current->thread.saved_sp0;
18043 current->thread.sysenter_cs = __KERNEL_CS;
18044 load_sp0(tss, &current->thread);
18045@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
18046 struct task_struct *tsk;
18047 int tmp, ret = -EPERM;
18048
18049+#ifdef CONFIG_GRKERNSEC_VM86
18050+ if (!capable(CAP_SYS_RAWIO)) {
18051+ gr_handle_vm86();
18052+ goto out;
18053+ }
18054+#endif
18055+
18056 tsk = current;
18057 if (tsk->thread.saved_sp0)
18058 goto out;
18059@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
18060 int tmp, ret;
18061 struct vm86plus_struct __user *v86;
18062
18063+#ifdef CONFIG_GRKERNSEC_VM86
18064+ if (!capable(CAP_SYS_RAWIO)) {
18065+ gr_handle_vm86();
18066+ ret = -EPERM;
18067+ goto out;
18068+ }
18069+#endif
18070+
18071 tsk = current;
18072 switch (regs->bx) {
18073 case VM86_REQUEST_IRQ:
18074@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
18075 tsk->thread.saved_fs = info->regs32->fs;
18076 tsk->thread.saved_gs = get_user_gs(info->regs32);
18077
18078- tss = &per_cpu(init_tss, get_cpu());
18079+ tss = init_tss + get_cpu();
18080 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
18081 if (cpu_has_sep)
18082 tsk->thread.sysenter_cs = 0;
18083@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
18084 goto cannot_handle;
18085 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
18086 goto cannot_handle;
18087- intr_ptr = (unsigned long __user *) (i << 2);
18088+ intr_ptr = (__force unsigned long __user *) (i << 2);
18089 if (get_user(segoffs, intr_ptr))
18090 goto cannot_handle;
18091 if ((segoffs >> 16) == BIOSSEG)
18092diff -urNp linux-2.6.32.46/arch/x86/kernel/vmi_32.c linux-2.6.32.46/arch/x86/kernel/vmi_32.c
18093--- linux-2.6.32.46/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
18094+++ linux-2.6.32.46/arch/x86/kernel/vmi_32.c 2011-08-05 20:33:55.000000000 -0400
18095@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
18096 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
18097
18098 #define call_vrom_func(rom,func) \
18099- (((VROMFUNC *)(rom->func))())
18100+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
18101
18102 #define call_vrom_long_func(rom,func,arg) \
18103- (((VROMLONGFUNC *)(rom->func)) (arg))
18104+({\
18105+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
18106+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
18107+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
18108+ __reloc;\
18109+})
18110
18111-static struct vrom_header *vmi_rom;
18112+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
18113 static int disable_pge;
18114 static int disable_pse;
18115 static int disable_sep;
18116@@ -76,10 +81,10 @@ static struct {
18117 void (*set_initial_ap_state)(int, int);
18118 void (*halt)(void);
18119 void (*set_lazy_mode)(int mode);
18120-} vmi_ops;
18121+} __no_const vmi_ops __read_only;
18122
18123 /* Cached VMI operations */
18124-struct vmi_timer_ops vmi_timer_ops;
18125+struct vmi_timer_ops vmi_timer_ops __read_only;
18126
18127 /*
18128 * VMI patching routines.
18129@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
18130 static inline void patch_offset(void *insnbuf,
18131 unsigned long ip, unsigned long dest)
18132 {
18133- *(unsigned long *)(insnbuf+1) = dest-ip-5;
18134+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
18135 }
18136
18137 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
18138@@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
18139 {
18140 u64 reloc;
18141 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
18142+
18143 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
18144 switch(rel->type) {
18145 case VMI_RELOCATION_CALL_REL:
18146@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
18147
18148 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
18149 {
18150- const pte_t pte = { .pte = 0 };
18151+ const pte_t pte = __pte(0ULL);
18152 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
18153 }
18154
18155 static void vmi_pmd_clear(pmd_t *pmd)
18156 {
18157- const pte_t pte = { .pte = 0 };
18158+ const pte_t pte = __pte(0ULL);
18159 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
18160 }
18161 #endif
18162@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
18163 ap.ss = __KERNEL_DS;
18164 ap.esp = (unsigned long) start_esp;
18165
18166- ap.ds = __USER_DS;
18167- ap.es = __USER_DS;
18168+ ap.ds = __KERNEL_DS;
18169+ ap.es = __KERNEL_DS;
18170 ap.fs = __KERNEL_PERCPU;
18171- ap.gs = __KERNEL_STACK_CANARY;
18172+ savesegment(gs, ap.gs);
18173
18174 ap.eflags = 0;
18175
18176@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
18177 paravirt_leave_lazy_mmu();
18178 }
18179
18180+#ifdef CONFIG_PAX_KERNEXEC
18181+static unsigned long vmi_pax_open_kernel(void)
18182+{
18183+ return 0;
18184+}
18185+
18186+static unsigned long vmi_pax_close_kernel(void)
18187+{
18188+ return 0;
18189+}
18190+#endif
18191+
18192 static inline int __init check_vmi_rom(struct vrom_header *rom)
18193 {
18194 struct pci_header *pci;
18195@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
18196 return 0;
18197 if (rom->vrom_signature != VMI_SIGNATURE)
18198 return 0;
18199+ if (rom->rom_length * 512 > sizeof(*rom)) {
18200+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
18201+ return 0;
18202+ }
18203 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
18204 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
18205 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
18206@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
18207 struct vrom_header *romstart;
18208 romstart = (struct vrom_header *)isa_bus_to_virt(base);
18209 if (check_vmi_rom(romstart)) {
18210- vmi_rom = romstart;
18211+ vmi_rom = *romstart;
18212 return 1;
18213 }
18214 }
18215@@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
18216
18217 para_fill(pv_irq_ops.safe_halt, Halt);
18218
18219+#ifdef CONFIG_PAX_KERNEXEC
18220+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
18221+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
18222+#endif
18223+
18224 /*
18225 * Alternative instruction rewriting doesn't happen soon enough
18226 * to convert VMI_IRET to a call instead of a jump; so we have
18227@@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
18228
18229 void __init vmi_init(void)
18230 {
18231- if (!vmi_rom)
18232+ if (!vmi_rom.rom_signature)
18233 probe_vmi_rom();
18234 else
18235- check_vmi_rom(vmi_rom);
18236+ check_vmi_rom(&vmi_rom);
18237
18238 /* In case probing for or validating the ROM failed, basil */
18239- if (!vmi_rom)
18240+ if (!vmi_rom.rom_signature)
18241 return;
18242
18243- reserve_top_address(-vmi_rom->virtual_top);
18244+ reserve_top_address(-vmi_rom.virtual_top);
18245
18246 #ifdef CONFIG_X86_IO_APIC
18247 /* This is virtual hardware; timer routing is wired correctly */
18248@@ -874,7 +901,7 @@ void __init vmi_activate(void)
18249 {
18250 unsigned long flags;
18251
18252- if (!vmi_rom)
18253+ if (!vmi_rom.rom_signature)
18254 return;
18255
18256 local_irq_save(flags);
18257diff -urNp linux-2.6.32.46/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.46/arch/x86/kernel/vmlinux.lds.S
18258--- linux-2.6.32.46/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
18259+++ linux-2.6.32.46/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
18260@@ -26,6 +26,13 @@
18261 #include <asm/page_types.h>
18262 #include <asm/cache.h>
18263 #include <asm/boot.h>
18264+#include <asm/segment.h>
18265+
18266+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18267+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
18268+#else
18269+#define __KERNEL_TEXT_OFFSET 0
18270+#endif
18271
18272 #undef i386 /* in case the preprocessor is a 32bit one */
18273
18274@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
18275 #ifdef CONFIG_X86_32
18276 OUTPUT_ARCH(i386)
18277 ENTRY(phys_startup_32)
18278-jiffies = jiffies_64;
18279 #else
18280 OUTPUT_ARCH(i386:x86-64)
18281 ENTRY(phys_startup_64)
18282-jiffies_64 = jiffies;
18283 #endif
18284
18285 PHDRS {
18286 text PT_LOAD FLAGS(5); /* R_E */
18287- data PT_LOAD FLAGS(7); /* RWE */
18288+#ifdef CONFIG_X86_32
18289+ module PT_LOAD FLAGS(5); /* R_E */
18290+#endif
18291+#ifdef CONFIG_XEN
18292+ rodata PT_LOAD FLAGS(5); /* R_E */
18293+#else
18294+ rodata PT_LOAD FLAGS(4); /* R__ */
18295+#endif
18296+ data PT_LOAD FLAGS(6); /* RW_ */
18297 #ifdef CONFIG_X86_64
18298 user PT_LOAD FLAGS(5); /* R_E */
18299+#endif
18300+ init.begin PT_LOAD FLAGS(6); /* RW_ */
18301 #ifdef CONFIG_SMP
18302 percpu PT_LOAD FLAGS(6); /* RW_ */
18303 #endif
18304+ text.init PT_LOAD FLAGS(5); /* R_E */
18305+ text.exit PT_LOAD FLAGS(5); /* R_E */
18306 init PT_LOAD FLAGS(7); /* RWE */
18307-#endif
18308 note PT_NOTE FLAGS(0); /* ___ */
18309 }
18310
18311 SECTIONS
18312 {
18313 #ifdef CONFIG_X86_32
18314- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
18315- phys_startup_32 = startup_32 - LOAD_OFFSET;
18316+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
18317 #else
18318- . = __START_KERNEL;
18319- phys_startup_64 = startup_64 - LOAD_OFFSET;
18320+ . = __START_KERNEL;
18321 #endif
18322
18323 /* Text and read-only data */
18324- .text : AT(ADDR(.text) - LOAD_OFFSET) {
18325- _text = .;
18326+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18327 /* bootstrapping code */
18328+#ifdef CONFIG_X86_32
18329+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18330+#else
18331+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18332+#endif
18333+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18334+ _text = .;
18335 HEAD_TEXT
18336 #ifdef CONFIG_X86_32
18337 . = ALIGN(PAGE_SIZE);
18338@@ -82,28 +102,71 @@ SECTIONS
18339 IRQENTRY_TEXT
18340 *(.fixup)
18341 *(.gnu.warning)
18342- /* End of text section */
18343- _etext = .;
18344 } :text = 0x9090
18345
18346- NOTES :text :note
18347+ . += __KERNEL_TEXT_OFFSET;
18348+
18349+#ifdef CONFIG_X86_32
18350+ . = ALIGN(PAGE_SIZE);
18351+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
18352+ *(.vmi.rom)
18353+ } :module
18354+
18355+ . = ALIGN(PAGE_SIZE);
18356+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
18357+
18358+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
18359+ MODULES_EXEC_VADDR = .;
18360+ BYTE(0)
18361+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
18362+ . = ALIGN(HPAGE_SIZE);
18363+ MODULES_EXEC_END = . - 1;
18364+#endif
18365+
18366+ } :module
18367+#endif
18368
18369- EXCEPTION_TABLE(16) :text = 0x9090
18370+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
18371+ /* End of text section */
18372+ _etext = . - __KERNEL_TEXT_OFFSET;
18373+ }
18374+
18375+#ifdef CONFIG_X86_32
18376+ . = ALIGN(PAGE_SIZE);
18377+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
18378+ *(.idt)
18379+ . = ALIGN(PAGE_SIZE);
18380+ *(.empty_zero_page)
18381+ *(.swapper_pg_fixmap)
18382+ *(.swapper_pg_pmd)
18383+ *(.swapper_pg_dir)
18384+ *(.trampoline_pg_dir)
18385+ } :rodata
18386+#endif
18387+
18388+ . = ALIGN(PAGE_SIZE);
18389+ NOTES :rodata :note
18390+
18391+ EXCEPTION_TABLE(16) :rodata
18392
18393 RO_DATA(PAGE_SIZE)
18394
18395 /* Data */
18396 .data : AT(ADDR(.data) - LOAD_OFFSET) {
18397+
18398+#ifdef CONFIG_PAX_KERNEXEC
18399+ . = ALIGN(HPAGE_SIZE);
18400+#else
18401+ . = ALIGN(PAGE_SIZE);
18402+#endif
18403+
18404 /* Start of data section */
18405 _sdata = .;
18406
18407 /* init_task */
18408 INIT_TASK_DATA(THREAD_SIZE)
18409
18410-#ifdef CONFIG_X86_32
18411- /* 32 bit has nosave before _edata */
18412 NOSAVE_DATA
18413-#endif
18414
18415 PAGE_ALIGNED_DATA(PAGE_SIZE)
18416
18417@@ -112,6 +175,8 @@ SECTIONS
18418 DATA_DATA
18419 CONSTRUCTORS
18420
18421+ jiffies = jiffies_64;
18422+
18423 /* rarely changed data like cpu maps */
18424 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
18425
18426@@ -166,12 +231,6 @@ SECTIONS
18427 }
18428 vgetcpu_mode = VVIRT(.vgetcpu_mode);
18429
18430- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
18431- .jiffies : AT(VLOAD(.jiffies)) {
18432- *(.jiffies)
18433- }
18434- jiffies = VVIRT(.jiffies);
18435-
18436 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
18437 *(.vsyscall_3)
18438 }
18439@@ -187,12 +246,19 @@ SECTIONS
18440 #endif /* CONFIG_X86_64 */
18441
18442 /* Init code and data - will be freed after init */
18443- . = ALIGN(PAGE_SIZE);
18444 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18445+ BYTE(0)
18446+
18447+#ifdef CONFIG_PAX_KERNEXEC
18448+ . = ALIGN(HPAGE_SIZE);
18449+#else
18450+ . = ALIGN(PAGE_SIZE);
18451+#endif
18452+
18453 __init_begin = .; /* paired with __init_end */
18454- }
18455+ } :init.begin
18456
18457-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18458+#ifdef CONFIG_SMP
18459 /*
18460 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18461 * output PHDR, so the next output section - .init.text - should
18462@@ -201,12 +267,27 @@ SECTIONS
18463 PERCPU_VADDR(0, :percpu)
18464 #endif
18465
18466- INIT_TEXT_SECTION(PAGE_SIZE)
18467-#ifdef CONFIG_X86_64
18468- :init
18469-#endif
18470+ . = ALIGN(PAGE_SIZE);
18471+ init_begin = .;
18472+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18473+ VMLINUX_SYMBOL(_sinittext) = .;
18474+ INIT_TEXT
18475+ VMLINUX_SYMBOL(_einittext) = .;
18476+ . = ALIGN(PAGE_SIZE);
18477+ } :text.init
18478
18479- INIT_DATA_SECTION(16)
18480+ /*
18481+ * .exit.text is discard at runtime, not link time, to deal with
18482+ * references from .altinstructions and .eh_frame
18483+ */
18484+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18485+ EXIT_TEXT
18486+ . = ALIGN(16);
18487+ } :text.exit
18488+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18489+
18490+ . = ALIGN(PAGE_SIZE);
18491+ INIT_DATA_SECTION(16) :init
18492
18493 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
18494 __x86_cpu_dev_start = .;
18495@@ -232,19 +313,11 @@ SECTIONS
18496 *(.altinstr_replacement)
18497 }
18498
18499- /*
18500- * .exit.text is discard at runtime, not link time, to deal with
18501- * references from .altinstructions and .eh_frame
18502- */
18503- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18504- EXIT_TEXT
18505- }
18506-
18507 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18508 EXIT_DATA
18509 }
18510
18511-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18512+#ifndef CONFIG_SMP
18513 PERCPU(PAGE_SIZE)
18514 #endif
18515
18516@@ -267,12 +340,6 @@ SECTIONS
18517 . = ALIGN(PAGE_SIZE);
18518 }
18519
18520-#ifdef CONFIG_X86_64
18521- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18522- NOSAVE_DATA
18523- }
18524-#endif
18525-
18526 /* BSS */
18527 . = ALIGN(PAGE_SIZE);
18528 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18529@@ -288,6 +355,7 @@ SECTIONS
18530 __brk_base = .;
18531 . += 64 * 1024; /* 64k alignment slop space */
18532 *(.brk_reservation) /* areas brk users have reserved */
18533+ . = ALIGN(HPAGE_SIZE);
18534 __brk_limit = .;
18535 }
18536
18537@@ -316,13 +384,12 @@ SECTIONS
18538 * for the boot processor.
18539 */
18540 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
18541-INIT_PER_CPU(gdt_page);
18542 INIT_PER_CPU(irq_stack_union);
18543
18544 /*
18545 * Build-time check on the image size:
18546 */
18547-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18548+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18549 "kernel image bigger than KERNEL_IMAGE_SIZE");
18550
18551 #ifdef CONFIG_SMP
18552diff -urNp linux-2.6.32.46/arch/x86/kernel/vsyscall_64.c linux-2.6.32.46/arch/x86/kernel/vsyscall_64.c
18553--- linux-2.6.32.46/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
18554+++ linux-2.6.32.46/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
18555@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
18556
18557 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
18558 /* copy vsyscall data */
18559+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
18560 vsyscall_gtod_data.clock.vread = clock->vread;
18561 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
18562 vsyscall_gtod_data.clock.mask = clock->mask;
18563@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
18564 We do this here because otherwise user space would do it on
18565 its own in a likely inferior way (no access to jiffies).
18566 If you don't like it pass NULL. */
18567- if (tcache && tcache->blob[0] == (j = __jiffies)) {
18568+ if (tcache && tcache->blob[0] == (j = jiffies)) {
18569 p = tcache->blob[1];
18570 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
18571 /* Load per CPU data from RDTSCP */
18572diff -urNp linux-2.6.32.46/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.46/arch/x86/kernel/x8664_ksyms_64.c
18573--- linux-2.6.32.46/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
18574+++ linux-2.6.32.46/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
18575@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
18576
18577 EXPORT_SYMBOL(copy_user_generic);
18578 EXPORT_SYMBOL(__copy_user_nocache);
18579-EXPORT_SYMBOL(copy_from_user);
18580-EXPORT_SYMBOL(copy_to_user);
18581 EXPORT_SYMBOL(__copy_from_user_inatomic);
18582
18583 EXPORT_SYMBOL(copy_page);
18584diff -urNp linux-2.6.32.46/arch/x86/kernel/xsave.c linux-2.6.32.46/arch/x86/kernel/xsave.c
18585--- linux-2.6.32.46/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
18586+++ linux-2.6.32.46/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
18587@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
18588 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18589 return -1;
18590
18591- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18592+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18593 fx_sw_user->extended_size -
18594 FP_XSTATE_MAGIC2_SIZE));
18595 /*
18596@@ -196,7 +196,7 @@ fx_only:
18597 * the other extended state.
18598 */
18599 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18600- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18601+ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
18602 }
18603
18604 /*
18605@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
18606 if (task_thread_info(tsk)->status & TS_XSAVE)
18607 err = restore_user_xstate(buf);
18608 else
18609- err = fxrstor_checking((__force struct i387_fxsave_struct *)
18610+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
18611 buf);
18612 if (unlikely(err)) {
18613 /*
18614diff -urNp linux-2.6.32.46/arch/x86/kvm/emulate.c linux-2.6.32.46/arch/x86/kvm/emulate.c
18615--- linux-2.6.32.46/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
18616+++ linux-2.6.32.46/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
18617@@ -81,8 +81,8 @@
18618 #define Src2CL (1<<29)
18619 #define Src2ImmByte (2<<29)
18620 #define Src2One (3<<29)
18621-#define Src2Imm16 (4<<29)
18622-#define Src2Mask (7<<29)
18623+#define Src2Imm16 (4U<<29)
18624+#define Src2Mask (7U<<29)
18625
18626 enum {
18627 Group1_80, Group1_81, Group1_82, Group1_83,
18628@@ -411,6 +411,7 @@ static u32 group2_table[] = {
18629
18630 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
18631 do { \
18632+ unsigned long _tmp; \
18633 __asm__ __volatile__ ( \
18634 _PRE_EFLAGS("0", "4", "2") \
18635 _op _suffix " %"_x"3,%1; " \
18636@@ -424,8 +425,6 @@ static u32 group2_table[] = {
18637 /* Raw emulation: instruction has two explicit operands. */
18638 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18639 do { \
18640- unsigned long _tmp; \
18641- \
18642 switch ((_dst).bytes) { \
18643 case 2: \
18644 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
18645@@ -441,7 +440,6 @@ static u32 group2_table[] = {
18646
18647 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18648 do { \
18649- unsigned long _tmp; \
18650 switch ((_dst).bytes) { \
18651 case 1: \
18652 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
18653diff -urNp linux-2.6.32.46/arch/x86/kvm/lapic.c linux-2.6.32.46/arch/x86/kvm/lapic.c
18654--- linux-2.6.32.46/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
18655+++ linux-2.6.32.46/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
18656@@ -52,7 +52,7 @@
18657 #define APIC_BUS_CYCLE_NS 1
18658
18659 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18660-#define apic_debug(fmt, arg...)
18661+#define apic_debug(fmt, arg...) do {} while (0)
18662
18663 #define APIC_LVT_NUM 6
18664 /* 14 is the version for Xeon and Pentium 8.4.8*/
18665diff -urNp linux-2.6.32.46/arch/x86/kvm/paging_tmpl.h linux-2.6.32.46/arch/x86/kvm/paging_tmpl.h
18666--- linux-2.6.32.46/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
18667+++ linux-2.6.32.46/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
18668@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
18669 int level = PT_PAGE_TABLE_LEVEL;
18670 unsigned long mmu_seq;
18671
18672+ pax_track_stack();
18673+
18674 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18675 kvm_mmu_audit(vcpu, "pre page fault");
18676
18677diff -urNp linux-2.6.32.46/arch/x86/kvm/svm.c linux-2.6.32.46/arch/x86/kvm/svm.c
18678--- linux-2.6.32.46/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
18679+++ linux-2.6.32.46/arch/x86/kvm/svm.c 2011-08-05 20:33:55.000000000 -0400
18680@@ -2485,7 +2485,11 @@ static void reload_tss(struct kvm_vcpu *
18681 int cpu = raw_smp_processor_id();
18682
18683 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
18684+
18685+ pax_open_kernel();
18686 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
18687+ pax_close_kernel();
18688+
18689 load_TR_desc();
18690 }
18691
18692@@ -2946,7 +2950,7 @@ static bool svm_gb_page_enable(void)
18693 return true;
18694 }
18695
18696-static struct kvm_x86_ops svm_x86_ops = {
18697+static const struct kvm_x86_ops svm_x86_ops = {
18698 .cpu_has_kvm_support = has_svm,
18699 .disabled_by_bios = is_disabled,
18700 .hardware_setup = svm_hardware_setup,
18701diff -urNp linux-2.6.32.46/arch/x86/kvm/vmx.c linux-2.6.32.46/arch/x86/kvm/vmx.c
18702--- linux-2.6.32.46/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
18703+++ linux-2.6.32.46/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
18704@@ -570,7 +570,11 @@ static void reload_tss(void)
18705
18706 kvm_get_gdt(&gdt);
18707 descs = (void *)gdt.base;
18708+
18709+ pax_open_kernel();
18710 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18711+ pax_close_kernel();
18712+
18713 load_TR_desc();
18714 }
18715
18716@@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
18717 if (!cpu_has_vmx_flexpriority())
18718 flexpriority_enabled = 0;
18719
18720- if (!cpu_has_vmx_tpr_shadow())
18721- kvm_x86_ops->update_cr8_intercept = NULL;
18722+ if (!cpu_has_vmx_tpr_shadow()) {
18723+ pax_open_kernel();
18724+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18725+ pax_close_kernel();
18726+ }
18727
18728 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18729 kvm_disable_largepages();
18730@@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
18731 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
18732
18733 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
18734- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
18735+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
18736 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
18737 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
18738 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
18739@@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
18740 "jmp .Lkvm_vmx_return \n\t"
18741 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18742 ".Lkvm_vmx_return: "
18743+
18744+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18745+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18746+ ".Lkvm_vmx_return2: "
18747+#endif
18748+
18749 /* Save guest registers, load host registers, keep flags */
18750 "xchg %0, (%%"R"sp) \n\t"
18751 "mov %%"R"ax, %c[rax](%0) \n\t"
18752@@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
18753 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
18754 #endif
18755 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
18756+
18757+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18758+ ,[cs]"i"(__KERNEL_CS)
18759+#endif
18760+
18761 : "cc", "memory"
18762- , R"bx", R"di", R"si"
18763+ , R"ax", R"bx", R"di", R"si"
18764 #ifdef CONFIG_X86_64
18765 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
18766 #endif
18767@@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
18768 if (vmx->rmode.irq.pending)
18769 fixup_rmode_irq(vmx);
18770
18771- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18772+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18773+
18774+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18775+ loadsegment(fs, __KERNEL_PERCPU);
18776+#endif
18777+
18778+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18779+ __set_fs(current_thread_info()->addr_limit);
18780+#endif
18781+
18782 vmx->launched = 1;
18783
18784 vmx_complete_interrupts(vmx);
18785@@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
18786 return false;
18787 }
18788
18789-static struct kvm_x86_ops vmx_x86_ops = {
18790+static const struct kvm_x86_ops vmx_x86_ops = {
18791 .cpu_has_kvm_support = cpu_has_kvm_support,
18792 .disabled_by_bios = vmx_disabled_by_bios,
18793 .hardware_setup = hardware_setup,
18794diff -urNp linux-2.6.32.46/arch/x86/kvm/x86.c linux-2.6.32.46/arch/x86/kvm/x86.c
18795--- linux-2.6.32.46/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
18796+++ linux-2.6.32.46/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
18797@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
18798 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
18799 struct kvm_cpuid_entry2 __user *entries);
18800
18801-struct kvm_x86_ops *kvm_x86_ops;
18802+const struct kvm_x86_ops *kvm_x86_ops;
18803 EXPORT_SYMBOL_GPL(kvm_x86_ops);
18804
18805 int ignore_msrs = 0;
18806@@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
18807 struct kvm_cpuid2 *cpuid,
18808 struct kvm_cpuid_entry2 __user *entries)
18809 {
18810- int r;
18811+ int r, i;
18812
18813 r = -E2BIG;
18814 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18815 goto out;
18816 r = -EFAULT;
18817- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18818- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18819+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18820 goto out;
18821+ for (i = 0; i < cpuid->nent; ++i) {
18822+ struct kvm_cpuid_entry2 cpuid_entry;
18823+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18824+ goto out;
18825+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
18826+ }
18827 vcpu->arch.cpuid_nent = cpuid->nent;
18828 kvm_apic_set_version(vcpu);
18829 return 0;
18830@@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
18831 struct kvm_cpuid2 *cpuid,
18832 struct kvm_cpuid_entry2 __user *entries)
18833 {
18834- int r;
18835+ int r, i;
18836
18837 vcpu_load(vcpu);
18838 r = -E2BIG;
18839 if (cpuid->nent < vcpu->arch.cpuid_nent)
18840 goto out;
18841 r = -EFAULT;
18842- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18843- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18844+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18845 goto out;
18846+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18847+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18848+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18849+ goto out;
18850+ }
18851 return 0;
18852
18853 out:
18854@@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
18855 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18856 struct kvm_interrupt *irq)
18857 {
18858- if (irq->irq < 0 || irq->irq >= 256)
18859+ if (irq->irq >= 256)
18860 return -EINVAL;
18861 if (irqchip_in_kernel(vcpu->kvm))
18862 return -ENXIO;
18863@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
18864 .notifier_call = kvmclock_cpufreq_notifier
18865 };
18866
18867-int kvm_arch_init(void *opaque)
18868+int kvm_arch_init(const void *opaque)
18869 {
18870 int r, cpu;
18871- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18872+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
18873
18874 if (kvm_x86_ops) {
18875 printk(KERN_ERR "kvm: already loaded the other module\n");
18876diff -urNp linux-2.6.32.46/arch/x86/lguest/boot.c linux-2.6.32.46/arch/x86/lguest/boot.c
18877--- linux-2.6.32.46/arch/x86/lguest/boot.c 2011-03-27 14:31:47.000000000 -0400
18878+++ linux-2.6.32.46/arch/x86/lguest/boot.c 2011-08-05 20:33:55.000000000 -0400
18879@@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vt
18880 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18881 * Launcher to reboot us.
18882 */
18883-static void lguest_restart(char *reason)
18884+static __noreturn void lguest_restart(char *reason)
18885 {
18886 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
18887+ BUG();
18888 }
18889
18890 /*G:050
18891diff -urNp linux-2.6.32.46/arch/x86/lib/atomic64_32.c linux-2.6.32.46/arch/x86/lib/atomic64_32.c
18892--- linux-2.6.32.46/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
18893+++ linux-2.6.32.46/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
18894@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
18895 }
18896 EXPORT_SYMBOL(atomic64_cmpxchg);
18897
18898+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
18899+{
18900+ return cmpxchg8b(&ptr->counter, old_val, new_val);
18901+}
18902+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
18903+
18904 /**
18905 * atomic64_xchg - xchg atomic64 variable
18906 * @ptr: pointer to type atomic64_t
18907@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
18908 EXPORT_SYMBOL(atomic64_xchg);
18909
18910 /**
18911+ * atomic64_xchg_unchecked - xchg atomic64 variable
18912+ * @ptr: pointer to type atomic64_unchecked_t
18913+ * @new_val: value to assign
18914+ *
18915+ * Atomically xchgs the value of @ptr to @new_val and returns
18916+ * the old value.
18917+ */
18918+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18919+{
18920+ /*
18921+ * Try first with a (possibly incorrect) assumption about
18922+ * what we have there. We'll do two loops most likely,
18923+ * but we'll get an ownership MESI transaction straight away
18924+ * instead of a read transaction followed by a
18925+ * flush-for-ownership transaction:
18926+ */
18927+ u64 old_val, real_val = 0;
18928+
18929+ do {
18930+ old_val = real_val;
18931+
18932+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18933+
18934+ } while (real_val != old_val);
18935+
18936+ return old_val;
18937+}
18938+EXPORT_SYMBOL(atomic64_xchg_unchecked);
18939+
18940+/**
18941 * atomic64_set - set atomic64 variable
18942 * @ptr: pointer to type atomic64_t
18943 * @new_val: value to assign
18944@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
18945 EXPORT_SYMBOL(atomic64_set);
18946
18947 /**
18948-EXPORT_SYMBOL(atomic64_read);
18949+ * atomic64_unchecked_set - set atomic64 variable
18950+ * @ptr: pointer to type atomic64_unchecked_t
18951+ * @new_val: value to assign
18952+ *
18953+ * Atomically sets the value of @ptr to @new_val.
18954+ */
18955+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18956+{
18957+ atomic64_xchg_unchecked(ptr, new_val);
18958+}
18959+EXPORT_SYMBOL(atomic64_set_unchecked);
18960+
18961+/**
18962 * atomic64_add_return - add and return
18963 * @delta: integer value to add
18964 * @ptr: pointer to type atomic64_t
18965@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
18966 }
18967 EXPORT_SYMBOL(atomic64_add_return);
18968
18969+/**
18970+ * atomic64_add_return_unchecked - add and return
18971+ * @delta: integer value to add
18972+ * @ptr: pointer to type atomic64_unchecked_t
18973+ *
18974+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
18975+ */
18976+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18977+{
18978+ /*
18979+ * Try first with a (possibly incorrect) assumption about
18980+ * what we have there. We'll do two loops most likely,
18981+ * but we'll get an ownership MESI transaction straight away
18982+ * instead of a read transaction followed by a
18983+ * flush-for-ownership transaction:
18984+ */
18985+ u64 old_val, new_val, real_val = 0;
18986+
18987+ do {
18988+ old_val = real_val;
18989+ new_val = old_val + delta;
18990+
18991+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18992+
18993+ } while (real_val != old_val);
18994+
18995+ return new_val;
18996+}
18997+EXPORT_SYMBOL(atomic64_add_return_unchecked);
18998+
18999 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
19000 {
19001 return atomic64_add_return(-delta, ptr);
19002 }
19003 EXPORT_SYMBOL(atomic64_sub_return);
19004
19005+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19006+{
19007+ return atomic64_add_return_unchecked(-delta, ptr);
19008+}
19009+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
19010+
19011 u64 atomic64_inc_return(atomic64_t *ptr)
19012 {
19013 return atomic64_add_return(1, ptr);
19014 }
19015 EXPORT_SYMBOL(atomic64_inc_return);
19016
19017+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
19018+{
19019+ return atomic64_add_return_unchecked(1, ptr);
19020+}
19021+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
19022+
19023 u64 atomic64_dec_return(atomic64_t *ptr)
19024 {
19025 return atomic64_sub_return(1, ptr);
19026 }
19027 EXPORT_SYMBOL(atomic64_dec_return);
19028
19029+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
19030+{
19031+ return atomic64_sub_return_unchecked(1, ptr);
19032+}
19033+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
19034+
19035 /**
19036 * atomic64_add - add integer to atomic64 variable
19037 * @delta: integer value to add
19038@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
19039 EXPORT_SYMBOL(atomic64_add);
19040
19041 /**
19042+ * atomic64_add_unchecked - add integer to atomic64 variable
19043+ * @delta: integer value to add
19044+ * @ptr: pointer to type atomic64_unchecked_t
19045+ *
19046+ * Atomically adds @delta to @ptr.
19047+ */
19048+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19049+{
19050+ atomic64_add_return_unchecked(delta, ptr);
19051+}
19052+EXPORT_SYMBOL(atomic64_add_unchecked);
19053+
19054+/**
19055 * atomic64_sub - subtract the atomic64 variable
19056 * @delta: integer value to subtract
19057 * @ptr: pointer to type atomic64_t
19058@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
19059 EXPORT_SYMBOL(atomic64_sub);
19060
19061 /**
19062+ * atomic64_sub_unchecked - subtract the atomic64 variable
19063+ * @delta: integer value to subtract
19064+ * @ptr: pointer to type atomic64_unchecked_t
19065+ *
19066+ * Atomically subtracts @delta from @ptr.
19067+ */
19068+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19069+{
19070+ atomic64_add_unchecked(-delta, ptr);
19071+}
19072+EXPORT_SYMBOL(atomic64_sub_unchecked);
19073+
19074+/**
19075 * atomic64_sub_and_test - subtract value from variable and test result
19076 * @delta: integer value to subtract
19077 * @ptr: pointer to type atomic64_t
19078@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
19079 EXPORT_SYMBOL(atomic64_inc);
19080
19081 /**
19082+ * atomic64_inc_unchecked - increment atomic64 variable
19083+ * @ptr: pointer to type atomic64_unchecked_t
19084+ *
19085+ * Atomically increments @ptr by 1.
19086+ */
19087+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
19088+{
19089+ atomic64_add_unchecked(1, ptr);
19090+}
19091+EXPORT_SYMBOL(atomic64_inc_unchecked);
19092+
19093+/**
19094 * atomic64_dec - decrement atomic64 variable
19095 * @ptr: pointer to type atomic64_t
19096 *
19097@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
19098 EXPORT_SYMBOL(atomic64_dec);
19099
19100 /**
19101+ * atomic64_dec_unchecked - decrement atomic64 variable
19102+ * @ptr: pointer to type atomic64_unchecked_t
19103+ *
19104+ * Atomically decrements @ptr by 1.
19105+ */
19106+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
19107+{
19108+ atomic64_sub_unchecked(1, ptr);
19109+}
19110+EXPORT_SYMBOL(atomic64_dec_unchecked);
19111+
19112+/**
19113 * atomic64_dec_and_test - decrement and test
19114 * @ptr: pointer to type atomic64_t
19115 *
19116diff -urNp linux-2.6.32.46/arch/x86/lib/checksum_32.S linux-2.6.32.46/arch/x86/lib/checksum_32.S
19117--- linux-2.6.32.46/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
19118+++ linux-2.6.32.46/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
19119@@ -28,7 +28,8 @@
19120 #include <linux/linkage.h>
19121 #include <asm/dwarf2.h>
19122 #include <asm/errno.h>
19123-
19124+#include <asm/segment.h>
19125+
19126 /*
19127 * computes a partial checksum, e.g. for TCP/UDP fragments
19128 */
19129@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
19130
19131 #define ARGBASE 16
19132 #define FP 12
19133-
19134-ENTRY(csum_partial_copy_generic)
19135+
19136+ENTRY(csum_partial_copy_generic_to_user)
19137 CFI_STARTPROC
19138+
19139+#ifdef CONFIG_PAX_MEMORY_UDEREF
19140+ pushl %gs
19141+ CFI_ADJUST_CFA_OFFSET 4
19142+ popl %es
19143+ CFI_ADJUST_CFA_OFFSET -4
19144+ jmp csum_partial_copy_generic
19145+#endif
19146+
19147+ENTRY(csum_partial_copy_generic_from_user)
19148+
19149+#ifdef CONFIG_PAX_MEMORY_UDEREF
19150+ pushl %gs
19151+ CFI_ADJUST_CFA_OFFSET 4
19152+ popl %ds
19153+ CFI_ADJUST_CFA_OFFSET -4
19154+#endif
19155+
19156+ENTRY(csum_partial_copy_generic)
19157 subl $4,%esp
19158 CFI_ADJUST_CFA_OFFSET 4
19159 pushl %edi
19160@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
19161 jmp 4f
19162 SRC(1: movw (%esi), %bx )
19163 addl $2, %esi
19164-DST( movw %bx, (%edi) )
19165+DST( movw %bx, %es:(%edi) )
19166 addl $2, %edi
19167 addw %bx, %ax
19168 adcl $0, %eax
19169@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
19170 SRC(1: movl (%esi), %ebx )
19171 SRC( movl 4(%esi), %edx )
19172 adcl %ebx, %eax
19173-DST( movl %ebx, (%edi) )
19174+DST( movl %ebx, %es:(%edi) )
19175 adcl %edx, %eax
19176-DST( movl %edx, 4(%edi) )
19177+DST( movl %edx, %es:4(%edi) )
19178
19179 SRC( movl 8(%esi), %ebx )
19180 SRC( movl 12(%esi), %edx )
19181 adcl %ebx, %eax
19182-DST( movl %ebx, 8(%edi) )
19183+DST( movl %ebx, %es:8(%edi) )
19184 adcl %edx, %eax
19185-DST( movl %edx, 12(%edi) )
19186+DST( movl %edx, %es:12(%edi) )
19187
19188 SRC( movl 16(%esi), %ebx )
19189 SRC( movl 20(%esi), %edx )
19190 adcl %ebx, %eax
19191-DST( movl %ebx, 16(%edi) )
19192+DST( movl %ebx, %es:16(%edi) )
19193 adcl %edx, %eax
19194-DST( movl %edx, 20(%edi) )
19195+DST( movl %edx, %es:20(%edi) )
19196
19197 SRC( movl 24(%esi), %ebx )
19198 SRC( movl 28(%esi), %edx )
19199 adcl %ebx, %eax
19200-DST( movl %ebx, 24(%edi) )
19201+DST( movl %ebx, %es:24(%edi) )
19202 adcl %edx, %eax
19203-DST( movl %edx, 28(%edi) )
19204+DST( movl %edx, %es:28(%edi) )
19205
19206 lea 32(%esi), %esi
19207 lea 32(%edi), %edi
19208@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
19209 shrl $2, %edx # This clears CF
19210 SRC(3: movl (%esi), %ebx )
19211 adcl %ebx, %eax
19212-DST( movl %ebx, (%edi) )
19213+DST( movl %ebx, %es:(%edi) )
19214 lea 4(%esi), %esi
19215 lea 4(%edi), %edi
19216 dec %edx
19217@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
19218 jb 5f
19219 SRC( movw (%esi), %cx )
19220 leal 2(%esi), %esi
19221-DST( movw %cx, (%edi) )
19222+DST( movw %cx, %es:(%edi) )
19223 leal 2(%edi), %edi
19224 je 6f
19225 shll $16,%ecx
19226 SRC(5: movb (%esi), %cl )
19227-DST( movb %cl, (%edi) )
19228+DST( movb %cl, %es:(%edi) )
19229 6: addl %ecx, %eax
19230 adcl $0, %eax
19231 7:
19232@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
19233
19234 6001:
19235 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19236- movl $-EFAULT, (%ebx)
19237+ movl $-EFAULT, %ss:(%ebx)
19238
19239 # zero the complete destination - computing the rest
19240 # is too much work
19241@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
19242
19243 6002:
19244 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19245- movl $-EFAULT,(%ebx)
19246+ movl $-EFAULT,%ss:(%ebx)
19247 jmp 5000b
19248
19249 .previous
19250
19251+ pushl %ss
19252+ CFI_ADJUST_CFA_OFFSET 4
19253+ popl %ds
19254+ CFI_ADJUST_CFA_OFFSET -4
19255+ pushl %ss
19256+ CFI_ADJUST_CFA_OFFSET 4
19257+ popl %es
19258+ CFI_ADJUST_CFA_OFFSET -4
19259 popl %ebx
19260 CFI_ADJUST_CFA_OFFSET -4
19261 CFI_RESTORE ebx
19262@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
19263 CFI_ADJUST_CFA_OFFSET -4
19264 ret
19265 CFI_ENDPROC
19266-ENDPROC(csum_partial_copy_generic)
19267+ENDPROC(csum_partial_copy_generic_to_user)
19268
19269 #else
19270
19271 /* Version for PentiumII/PPro */
19272
19273 #define ROUND1(x) \
19274+ nop; nop; nop; \
19275 SRC(movl x(%esi), %ebx ) ; \
19276 addl %ebx, %eax ; \
19277- DST(movl %ebx, x(%edi) ) ;
19278+ DST(movl %ebx, %es:x(%edi)) ;
19279
19280 #define ROUND(x) \
19281+ nop; nop; nop; \
19282 SRC(movl x(%esi), %ebx ) ; \
19283 adcl %ebx, %eax ; \
19284- DST(movl %ebx, x(%edi) ) ;
19285+ DST(movl %ebx, %es:x(%edi)) ;
19286
19287 #define ARGBASE 12
19288-
19289-ENTRY(csum_partial_copy_generic)
19290+
19291+ENTRY(csum_partial_copy_generic_to_user)
19292 CFI_STARTPROC
19293+
19294+#ifdef CONFIG_PAX_MEMORY_UDEREF
19295+ pushl %gs
19296+ CFI_ADJUST_CFA_OFFSET 4
19297+ popl %es
19298+ CFI_ADJUST_CFA_OFFSET -4
19299+ jmp csum_partial_copy_generic
19300+#endif
19301+
19302+ENTRY(csum_partial_copy_generic_from_user)
19303+
19304+#ifdef CONFIG_PAX_MEMORY_UDEREF
19305+ pushl %gs
19306+ CFI_ADJUST_CFA_OFFSET 4
19307+ popl %ds
19308+ CFI_ADJUST_CFA_OFFSET -4
19309+#endif
19310+
19311+ENTRY(csum_partial_copy_generic)
19312 pushl %ebx
19313 CFI_ADJUST_CFA_OFFSET 4
19314 CFI_REL_OFFSET ebx, 0
19315@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
19316 subl %ebx, %edi
19317 lea -1(%esi),%edx
19318 andl $-32,%edx
19319- lea 3f(%ebx,%ebx), %ebx
19320+ lea 3f(%ebx,%ebx,2), %ebx
19321 testl %esi, %esi
19322 jmp *%ebx
19323 1: addl $64,%esi
19324@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
19325 jb 5f
19326 SRC( movw (%esi), %dx )
19327 leal 2(%esi), %esi
19328-DST( movw %dx, (%edi) )
19329+DST( movw %dx, %es:(%edi) )
19330 leal 2(%edi), %edi
19331 je 6f
19332 shll $16,%edx
19333 5:
19334 SRC( movb (%esi), %dl )
19335-DST( movb %dl, (%edi) )
19336+DST( movb %dl, %es:(%edi) )
19337 6: addl %edx, %eax
19338 adcl $0, %eax
19339 7:
19340 .section .fixup, "ax"
19341 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19342- movl $-EFAULT, (%ebx)
19343+ movl $-EFAULT, %ss:(%ebx)
19344 # zero the complete destination (computing the rest is too much work)
19345 movl ARGBASE+8(%esp),%edi # dst
19346 movl ARGBASE+12(%esp),%ecx # len
19347@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
19348 rep; stosb
19349 jmp 7b
19350 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19351- movl $-EFAULT, (%ebx)
19352+ movl $-EFAULT, %ss:(%ebx)
19353 jmp 7b
19354 .previous
19355
19356+#ifdef CONFIG_PAX_MEMORY_UDEREF
19357+ pushl %ss
19358+ CFI_ADJUST_CFA_OFFSET 4
19359+ popl %ds
19360+ CFI_ADJUST_CFA_OFFSET -4
19361+ pushl %ss
19362+ CFI_ADJUST_CFA_OFFSET 4
19363+ popl %es
19364+ CFI_ADJUST_CFA_OFFSET -4
19365+#endif
19366+
19367 popl %esi
19368 CFI_ADJUST_CFA_OFFSET -4
19369 CFI_RESTORE esi
19370@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
19371 CFI_RESTORE ebx
19372 ret
19373 CFI_ENDPROC
19374-ENDPROC(csum_partial_copy_generic)
19375+ENDPROC(csum_partial_copy_generic_to_user)
19376
19377 #undef ROUND
19378 #undef ROUND1
19379diff -urNp linux-2.6.32.46/arch/x86/lib/clear_page_64.S linux-2.6.32.46/arch/x86/lib/clear_page_64.S
19380--- linux-2.6.32.46/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
19381+++ linux-2.6.32.46/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
19382@@ -43,7 +43,7 @@ ENDPROC(clear_page)
19383
19384 #include <asm/cpufeature.h>
19385
19386- .section .altinstr_replacement,"ax"
19387+ .section .altinstr_replacement,"a"
19388 1: .byte 0xeb /* jmp <disp8> */
19389 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19390 2:
19391diff -urNp linux-2.6.32.46/arch/x86/lib/copy_page_64.S linux-2.6.32.46/arch/x86/lib/copy_page_64.S
19392--- linux-2.6.32.46/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
19393+++ linux-2.6.32.46/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
19394@@ -104,7 +104,7 @@ ENDPROC(copy_page)
19395
19396 #include <asm/cpufeature.h>
19397
19398- .section .altinstr_replacement,"ax"
19399+ .section .altinstr_replacement,"a"
19400 1: .byte 0xeb /* jmp <disp8> */
19401 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19402 2:
19403diff -urNp linux-2.6.32.46/arch/x86/lib/copy_user_64.S linux-2.6.32.46/arch/x86/lib/copy_user_64.S
19404--- linux-2.6.32.46/arch/x86/lib/copy_user_64.S 2011-06-25 12:55:34.000000000 -0400
19405+++ linux-2.6.32.46/arch/x86/lib/copy_user_64.S 2011-06-25 12:56:37.000000000 -0400
19406@@ -15,13 +15,14 @@
19407 #include <asm/asm-offsets.h>
19408 #include <asm/thread_info.h>
19409 #include <asm/cpufeature.h>
19410+#include <asm/pgtable.h>
19411
19412 .macro ALTERNATIVE_JUMP feature,orig,alt
19413 0:
19414 .byte 0xe9 /* 32bit jump */
19415 .long \orig-1f /* by default jump to orig */
19416 1:
19417- .section .altinstr_replacement,"ax"
19418+ .section .altinstr_replacement,"a"
19419 2: .byte 0xe9 /* near jump with 32bit immediate */
19420 .long \alt-1b /* offset */ /* or alternatively to alt */
19421 .previous
19422@@ -64,49 +65,19 @@
19423 #endif
19424 .endm
19425
19426-/* Standard copy_to_user with segment limit checking */
19427-ENTRY(copy_to_user)
19428- CFI_STARTPROC
19429- GET_THREAD_INFO(%rax)
19430- movq %rdi,%rcx
19431- addq %rdx,%rcx
19432- jc bad_to_user
19433- cmpq TI_addr_limit(%rax),%rcx
19434- ja bad_to_user
19435- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19436- CFI_ENDPROC
19437-ENDPROC(copy_to_user)
19438-
19439-/* Standard copy_from_user with segment limit checking */
19440-ENTRY(copy_from_user)
19441- CFI_STARTPROC
19442- GET_THREAD_INFO(%rax)
19443- movq %rsi,%rcx
19444- addq %rdx,%rcx
19445- jc bad_from_user
19446- cmpq TI_addr_limit(%rax),%rcx
19447- ja bad_from_user
19448- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19449- CFI_ENDPROC
19450-ENDPROC(copy_from_user)
19451-
19452 ENTRY(copy_user_generic)
19453 CFI_STARTPROC
19454 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19455 CFI_ENDPROC
19456 ENDPROC(copy_user_generic)
19457
19458-ENTRY(__copy_from_user_inatomic)
19459- CFI_STARTPROC
19460- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19461- CFI_ENDPROC
19462-ENDPROC(__copy_from_user_inatomic)
19463-
19464 .section .fixup,"ax"
19465 /* must zero dest */
19466 ENTRY(bad_from_user)
19467 bad_from_user:
19468 CFI_STARTPROC
19469+ testl %edx,%edx
19470+ js bad_to_user
19471 movl %edx,%ecx
19472 xorl %eax,%eax
19473 rep
19474diff -urNp linux-2.6.32.46/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.46/arch/x86/lib/copy_user_nocache_64.S
19475--- linux-2.6.32.46/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
19476+++ linux-2.6.32.46/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
19477@@ -14,6 +14,7 @@
19478 #include <asm/current.h>
19479 #include <asm/asm-offsets.h>
19480 #include <asm/thread_info.h>
19481+#include <asm/pgtable.h>
19482
19483 .macro ALIGN_DESTINATION
19484 #ifdef FIX_ALIGNMENT
19485@@ -50,6 +51,15 @@
19486 */
19487 ENTRY(__copy_user_nocache)
19488 CFI_STARTPROC
19489+
19490+#ifdef CONFIG_PAX_MEMORY_UDEREF
19491+ mov $PAX_USER_SHADOW_BASE,%rcx
19492+ cmp %rcx,%rsi
19493+ jae 1f
19494+ add %rcx,%rsi
19495+1:
19496+#endif
19497+
19498 cmpl $8,%edx
19499 jb 20f /* less then 8 bytes, go to byte copy loop */
19500 ALIGN_DESTINATION
19501diff -urNp linux-2.6.32.46/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.46/arch/x86/lib/csum-wrappers_64.c
19502--- linux-2.6.32.46/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
19503+++ linux-2.6.32.46/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
19504@@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
19505 len -= 2;
19506 }
19507 }
19508+
19509+#ifdef CONFIG_PAX_MEMORY_UDEREF
19510+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19511+ src += PAX_USER_SHADOW_BASE;
19512+#endif
19513+
19514 isum = csum_partial_copy_generic((__force const void *)src,
19515 dst, len, isum, errp, NULL);
19516 if (unlikely(*errp))
19517@@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
19518 }
19519
19520 *errp = 0;
19521+
19522+#ifdef CONFIG_PAX_MEMORY_UDEREF
19523+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19524+ dst += PAX_USER_SHADOW_BASE;
19525+#endif
19526+
19527 return csum_partial_copy_generic(src, (void __force *)dst,
19528 len, isum, NULL, errp);
19529 }
19530diff -urNp linux-2.6.32.46/arch/x86/lib/getuser.S linux-2.6.32.46/arch/x86/lib/getuser.S
19531--- linux-2.6.32.46/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
19532+++ linux-2.6.32.46/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
19533@@ -33,14 +33,35 @@
19534 #include <asm/asm-offsets.h>
19535 #include <asm/thread_info.h>
19536 #include <asm/asm.h>
19537+#include <asm/segment.h>
19538+#include <asm/pgtable.h>
19539+
19540+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19541+#define __copyuser_seg gs;
19542+#else
19543+#define __copyuser_seg
19544+#endif
19545
19546 .text
19547 ENTRY(__get_user_1)
19548 CFI_STARTPROC
19549+
19550+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19551 GET_THREAD_INFO(%_ASM_DX)
19552 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19553 jae bad_get_user
19554-1: movzb (%_ASM_AX),%edx
19555+
19556+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19557+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19558+ cmp %_ASM_DX,%_ASM_AX
19559+ jae 1234f
19560+ add %_ASM_DX,%_ASM_AX
19561+1234:
19562+#endif
19563+
19564+#endif
19565+
19566+1: __copyuser_seg movzb (%_ASM_AX),%edx
19567 xor %eax,%eax
19568 ret
19569 CFI_ENDPROC
19570@@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
19571 ENTRY(__get_user_2)
19572 CFI_STARTPROC
19573 add $1,%_ASM_AX
19574+
19575+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19576 jc bad_get_user
19577 GET_THREAD_INFO(%_ASM_DX)
19578 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19579 jae bad_get_user
19580-2: movzwl -1(%_ASM_AX),%edx
19581+
19582+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19583+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19584+ cmp %_ASM_DX,%_ASM_AX
19585+ jae 1234f
19586+ add %_ASM_DX,%_ASM_AX
19587+1234:
19588+#endif
19589+
19590+#endif
19591+
19592+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19593 xor %eax,%eax
19594 ret
19595 CFI_ENDPROC
19596@@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
19597 ENTRY(__get_user_4)
19598 CFI_STARTPROC
19599 add $3,%_ASM_AX
19600+
19601+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19602 jc bad_get_user
19603 GET_THREAD_INFO(%_ASM_DX)
19604 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19605 jae bad_get_user
19606-3: mov -3(%_ASM_AX),%edx
19607+
19608+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19609+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19610+ cmp %_ASM_DX,%_ASM_AX
19611+ jae 1234f
19612+ add %_ASM_DX,%_ASM_AX
19613+1234:
19614+#endif
19615+
19616+#endif
19617+
19618+3: __copyuser_seg mov -3(%_ASM_AX),%edx
19619 xor %eax,%eax
19620 ret
19621 CFI_ENDPROC
19622@@ -80,6 +127,15 @@ ENTRY(__get_user_8)
19623 GET_THREAD_INFO(%_ASM_DX)
19624 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19625 jae bad_get_user
19626+
19627+#ifdef CONFIG_PAX_MEMORY_UDEREF
19628+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19629+ cmp %_ASM_DX,%_ASM_AX
19630+ jae 1234f
19631+ add %_ASM_DX,%_ASM_AX
19632+1234:
19633+#endif
19634+
19635 4: movq -7(%_ASM_AX),%_ASM_DX
19636 xor %eax,%eax
19637 ret
19638diff -urNp linux-2.6.32.46/arch/x86/lib/memcpy_64.S linux-2.6.32.46/arch/x86/lib/memcpy_64.S
19639--- linux-2.6.32.46/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
19640+++ linux-2.6.32.46/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
19641@@ -128,7 +128,7 @@ ENDPROC(__memcpy)
19642 * It is also a lot simpler. Use this when possible:
19643 */
19644
19645- .section .altinstr_replacement, "ax"
19646+ .section .altinstr_replacement, "a"
19647 1: .byte 0xeb /* jmp <disp8> */
19648 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
19649 2:
19650diff -urNp linux-2.6.32.46/arch/x86/lib/memset_64.S linux-2.6.32.46/arch/x86/lib/memset_64.S
19651--- linux-2.6.32.46/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
19652+++ linux-2.6.32.46/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
19653@@ -118,7 +118,7 @@ ENDPROC(__memset)
19654
19655 #include <asm/cpufeature.h>
19656
19657- .section .altinstr_replacement,"ax"
19658+ .section .altinstr_replacement,"a"
19659 1: .byte 0xeb /* jmp <disp8> */
19660 .byte (memset_c - memset) - (2f - 1b) /* offset */
19661 2:
19662diff -urNp linux-2.6.32.46/arch/x86/lib/mmx_32.c linux-2.6.32.46/arch/x86/lib/mmx_32.c
19663--- linux-2.6.32.46/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
19664+++ linux-2.6.32.46/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
19665@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
19666 {
19667 void *p;
19668 int i;
19669+ unsigned long cr0;
19670
19671 if (unlikely(in_interrupt()))
19672 return __memcpy(to, from, len);
19673@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
19674 kernel_fpu_begin();
19675
19676 __asm__ __volatile__ (
19677- "1: prefetch (%0)\n" /* This set is 28 bytes */
19678- " prefetch 64(%0)\n"
19679- " prefetch 128(%0)\n"
19680- " prefetch 192(%0)\n"
19681- " prefetch 256(%0)\n"
19682+ "1: prefetch (%1)\n" /* This set is 28 bytes */
19683+ " prefetch 64(%1)\n"
19684+ " prefetch 128(%1)\n"
19685+ " prefetch 192(%1)\n"
19686+ " prefetch 256(%1)\n"
19687 "2: \n"
19688 ".section .fixup, \"ax\"\n"
19689- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19690+ "3: \n"
19691+
19692+#ifdef CONFIG_PAX_KERNEXEC
19693+ " movl %%cr0, %0\n"
19694+ " movl %0, %%eax\n"
19695+ " andl $0xFFFEFFFF, %%eax\n"
19696+ " movl %%eax, %%cr0\n"
19697+#endif
19698+
19699+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19700+
19701+#ifdef CONFIG_PAX_KERNEXEC
19702+ " movl %0, %%cr0\n"
19703+#endif
19704+
19705 " jmp 2b\n"
19706 ".previous\n"
19707 _ASM_EXTABLE(1b, 3b)
19708- : : "r" (from));
19709+ : "=&r" (cr0) : "r" (from) : "ax");
19710
19711 for ( ; i > 5; i--) {
19712 __asm__ __volatile__ (
19713- "1: prefetch 320(%0)\n"
19714- "2: movq (%0), %%mm0\n"
19715- " movq 8(%0), %%mm1\n"
19716- " movq 16(%0), %%mm2\n"
19717- " movq 24(%0), %%mm3\n"
19718- " movq %%mm0, (%1)\n"
19719- " movq %%mm1, 8(%1)\n"
19720- " movq %%mm2, 16(%1)\n"
19721- " movq %%mm3, 24(%1)\n"
19722- " movq 32(%0), %%mm0\n"
19723- " movq 40(%0), %%mm1\n"
19724- " movq 48(%0), %%mm2\n"
19725- " movq 56(%0), %%mm3\n"
19726- " movq %%mm0, 32(%1)\n"
19727- " movq %%mm1, 40(%1)\n"
19728- " movq %%mm2, 48(%1)\n"
19729- " movq %%mm3, 56(%1)\n"
19730+ "1: prefetch 320(%1)\n"
19731+ "2: movq (%1), %%mm0\n"
19732+ " movq 8(%1), %%mm1\n"
19733+ " movq 16(%1), %%mm2\n"
19734+ " movq 24(%1), %%mm3\n"
19735+ " movq %%mm0, (%2)\n"
19736+ " movq %%mm1, 8(%2)\n"
19737+ " movq %%mm2, 16(%2)\n"
19738+ " movq %%mm3, 24(%2)\n"
19739+ " movq 32(%1), %%mm0\n"
19740+ " movq 40(%1), %%mm1\n"
19741+ " movq 48(%1), %%mm2\n"
19742+ " movq 56(%1), %%mm3\n"
19743+ " movq %%mm0, 32(%2)\n"
19744+ " movq %%mm1, 40(%2)\n"
19745+ " movq %%mm2, 48(%2)\n"
19746+ " movq %%mm3, 56(%2)\n"
19747 ".section .fixup, \"ax\"\n"
19748- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19749+ "3:\n"
19750+
19751+#ifdef CONFIG_PAX_KERNEXEC
19752+ " movl %%cr0, %0\n"
19753+ " movl %0, %%eax\n"
19754+ " andl $0xFFFEFFFF, %%eax\n"
19755+ " movl %%eax, %%cr0\n"
19756+#endif
19757+
19758+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19759+
19760+#ifdef CONFIG_PAX_KERNEXEC
19761+ " movl %0, %%cr0\n"
19762+#endif
19763+
19764 " jmp 2b\n"
19765 ".previous\n"
19766 _ASM_EXTABLE(1b, 3b)
19767- : : "r" (from), "r" (to) : "memory");
19768+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19769
19770 from += 64;
19771 to += 64;
19772@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
19773 static void fast_copy_page(void *to, void *from)
19774 {
19775 int i;
19776+ unsigned long cr0;
19777
19778 kernel_fpu_begin();
19779
19780@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
19781 * but that is for later. -AV
19782 */
19783 __asm__ __volatile__(
19784- "1: prefetch (%0)\n"
19785- " prefetch 64(%0)\n"
19786- " prefetch 128(%0)\n"
19787- " prefetch 192(%0)\n"
19788- " prefetch 256(%0)\n"
19789+ "1: prefetch (%1)\n"
19790+ " prefetch 64(%1)\n"
19791+ " prefetch 128(%1)\n"
19792+ " prefetch 192(%1)\n"
19793+ " prefetch 256(%1)\n"
19794 "2: \n"
19795 ".section .fixup, \"ax\"\n"
19796- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19797+ "3: \n"
19798+
19799+#ifdef CONFIG_PAX_KERNEXEC
19800+ " movl %%cr0, %0\n"
19801+ " movl %0, %%eax\n"
19802+ " andl $0xFFFEFFFF, %%eax\n"
19803+ " movl %%eax, %%cr0\n"
19804+#endif
19805+
19806+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19807+
19808+#ifdef CONFIG_PAX_KERNEXEC
19809+ " movl %0, %%cr0\n"
19810+#endif
19811+
19812 " jmp 2b\n"
19813 ".previous\n"
19814- _ASM_EXTABLE(1b, 3b) : : "r" (from));
19815+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19816
19817 for (i = 0; i < (4096-320)/64; i++) {
19818 __asm__ __volatile__ (
19819- "1: prefetch 320(%0)\n"
19820- "2: movq (%0), %%mm0\n"
19821- " movntq %%mm0, (%1)\n"
19822- " movq 8(%0), %%mm1\n"
19823- " movntq %%mm1, 8(%1)\n"
19824- " movq 16(%0), %%mm2\n"
19825- " movntq %%mm2, 16(%1)\n"
19826- " movq 24(%0), %%mm3\n"
19827- " movntq %%mm3, 24(%1)\n"
19828- " movq 32(%0), %%mm4\n"
19829- " movntq %%mm4, 32(%1)\n"
19830- " movq 40(%0), %%mm5\n"
19831- " movntq %%mm5, 40(%1)\n"
19832- " movq 48(%0), %%mm6\n"
19833- " movntq %%mm6, 48(%1)\n"
19834- " movq 56(%0), %%mm7\n"
19835- " movntq %%mm7, 56(%1)\n"
19836+ "1: prefetch 320(%1)\n"
19837+ "2: movq (%1), %%mm0\n"
19838+ " movntq %%mm0, (%2)\n"
19839+ " movq 8(%1), %%mm1\n"
19840+ " movntq %%mm1, 8(%2)\n"
19841+ " movq 16(%1), %%mm2\n"
19842+ " movntq %%mm2, 16(%2)\n"
19843+ " movq 24(%1), %%mm3\n"
19844+ " movntq %%mm3, 24(%2)\n"
19845+ " movq 32(%1), %%mm4\n"
19846+ " movntq %%mm4, 32(%2)\n"
19847+ " movq 40(%1), %%mm5\n"
19848+ " movntq %%mm5, 40(%2)\n"
19849+ " movq 48(%1), %%mm6\n"
19850+ " movntq %%mm6, 48(%2)\n"
19851+ " movq 56(%1), %%mm7\n"
19852+ " movntq %%mm7, 56(%2)\n"
19853 ".section .fixup, \"ax\"\n"
19854- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19855+ "3:\n"
19856+
19857+#ifdef CONFIG_PAX_KERNEXEC
19858+ " movl %%cr0, %0\n"
19859+ " movl %0, %%eax\n"
19860+ " andl $0xFFFEFFFF, %%eax\n"
19861+ " movl %%eax, %%cr0\n"
19862+#endif
19863+
19864+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19865+
19866+#ifdef CONFIG_PAX_KERNEXEC
19867+ " movl %0, %%cr0\n"
19868+#endif
19869+
19870 " jmp 2b\n"
19871 ".previous\n"
19872- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
19873+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19874
19875 from += 64;
19876 to += 64;
19877@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
19878 static void fast_copy_page(void *to, void *from)
19879 {
19880 int i;
19881+ unsigned long cr0;
19882
19883 kernel_fpu_begin();
19884
19885 __asm__ __volatile__ (
19886- "1: prefetch (%0)\n"
19887- " prefetch 64(%0)\n"
19888- " prefetch 128(%0)\n"
19889- " prefetch 192(%0)\n"
19890- " prefetch 256(%0)\n"
19891+ "1: prefetch (%1)\n"
19892+ " prefetch 64(%1)\n"
19893+ " prefetch 128(%1)\n"
19894+ " prefetch 192(%1)\n"
19895+ " prefetch 256(%1)\n"
19896 "2: \n"
19897 ".section .fixup, \"ax\"\n"
19898- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19899+ "3: \n"
19900+
19901+#ifdef CONFIG_PAX_KERNEXEC
19902+ " movl %%cr0, %0\n"
19903+ " movl %0, %%eax\n"
19904+ " andl $0xFFFEFFFF, %%eax\n"
19905+ " movl %%eax, %%cr0\n"
19906+#endif
19907+
19908+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19909+
19910+#ifdef CONFIG_PAX_KERNEXEC
19911+ " movl %0, %%cr0\n"
19912+#endif
19913+
19914 " jmp 2b\n"
19915 ".previous\n"
19916- _ASM_EXTABLE(1b, 3b) : : "r" (from));
19917+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19918
19919 for (i = 0; i < 4096/64; i++) {
19920 __asm__ __volatile__ (
19921- "1: prefetch 320(%0)\n"
19922- "2: movq (%0), %%mm0\n"
19923- " movq 8(%0), %%mm1\n"
19924- " movq 16(%0), %%mm2\n"
19925- " movq 24(%0), %%mm3\n"
19926- " movq %%mm0, (%1)\n"
19927- " movq %%mm1, 8(%1)\n"
19928- " movq %%mm2, 16(%1)\n"
19929- " movq %%mm3, 24(%1)\n"
19930- " movq 32(%0), %%mm0\n"
19931- " movq 40(%0), %%mm1\n"
19932- " movq 48(%0), %%mm2\n"
19933- " movq 56(%0), %%mm3\n"
19934- " movq %%mm0, 32(%1)\n"
19935- " movq %%mm1, 40(%1)\n"
19936- " movq %%mm2, 48(%1)\n"
19937- " movq %%mm3, 56(%1)\n"
19938+ "1: prefetch 320(%1)\n"
19939+ "2: movq (%1), %%mm0\n"
19940+ " movq 8(%1), %%mm1\n"
19941+ " movq 16(%1), %%mm2\n"
19942+ " movq 24(%1), %%mm3\n"
19943+ " movq %%mm0, (%2)\n"
19944+ " movq %%mm1, 8(%2)\n"
19945+ " movq %%mm2, 16(%2)\n"
19946+ " movq %%mm3, 24(%2)\n"
19947+ " movq 32(%1), %%mm0\n"
19948+ " movq 40(%1), %%mm1\n"
19949+ " movq 48(%1), %%mm2\n"
19950+ " movq 56(%1), %%mm3\n"
19951+ " movq %%mm0, 32(%2)\n"
19952+ " movq %%mm1, 40(%2)\n"
19953+ " movq %%mm2, 48(%2)\n"
19954+ " movq %%mm3, 56(%2)\n"
19955 ".section .fixup, \"ax\"\n"
19956- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19957+ "3:\n"
19958+
19959+#ifdef CONFIG_PAX_KERNEXEC
19960+ " movl %%cr0, %0\n"
19961+ " movl %0, %%eax\n"
19962+ " andl $0xFFFEFFFF, %%eax\n"
19963+ " movl %%eax, %%cr0\n"
19964+#endif
19965+
19966+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19967+
19968+#ifdef CONFIG_PAX_KERNEXEC
19969+ " movl %0, %%cr0\n"
19970+#endif
19971+
19972 " jmp 2b\n"
19973 ".previous\n"
19974 _ASM_EXTABLE(1b, 3b)
19975- : : "r" (from), "r" (to) : "memory");
19976+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19977
19978 from += 64;
19979 to += 64;
19980diff -urNp linux-2.6.32.46/arch/x86/lib/putuser.S linux-2.6.32.46/arch/x86/lib/putuser.S
19981--- linux-2.6.32.46/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
19982+++ linux-2.6.32.46/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
19983@@ -15,7 +15,8 @@
19984 #include <asm/thread_info.h>
19985 #include <asm/errno.h>
19986 #include <asm/asm.h>
19987-
19988+#include <asm/segment.h>
19989+#include <asm/pgtable.h>
19990
19991 /*
19992 * __put_user_X
19993@@ -29,52 +30,119 @@
19994 * as they get called from within inline assembly.
19995 */
19996
19997-#define ENTER CFI_STARTPROC ; \
19998- GET_THREAD_INFO(%_ASM_BX)
19999+#define ENTER CFI_STARTPROC
20000 #define EXIT ret ; \
20001 CFI_ENDPROC
20002
20003+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20004+#define _DEST %_ASM_CX,%_ASM_BX
20005+#else
20006+#define _DEST %_ASM_CX
20007+#endif
20008+
20009+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20010+#define __copyuser_seg gs;
20011+#else
20012+#define __copyuser_seg
20013+#endif
20014+
20015 .text
20016 ENTRY(__put_user_1)
20017 ENTER
20018+
20019+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20020+ GET_THREAD_INFO(%_ASM_BX)
20021 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20022 jae bad_put_user
20023-1: movb %al,(%_ASM_CX)
20024+
20025+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20026+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20027+ cmp %_ASM_BX,%_ASM_CX
20028+ jb 1234f
20029+ xor %ebx,%ebx
20030+1234:
20031+#endif
20032+
20033+#endif
20034+
20035+1: __copyuser_seg movb %al,(_DEST)
20036 xor %eax,%eax
20037 EXIT
20038 ENDPROC(__put_user_1)
20039
20040 ENTRY(__put_user_2)
20041 ENTER
20042+
20043+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20044+ GET_THREAD_INFO(%_ASM_BX)
20045 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20046 sub $1,%_ASM_BX
20047 cmp %_ASM_BX,%_ASM_CX
20048 jae bad_put_user
20049-2: movw %ax,(%_ASM_CX)
20050+
20051+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20052+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20053+ cmp %_ASM_BX,%_ASM_CX
20054+ jb 1234f
20055+ xor %ebx,%ebx
20056+1234:
20057+#endif
20058+
20059+#endif
20060+
20061+2: __copyuser_seg movw %ax,(_DEST)
20062 xor %eax,%eax
20063 EXIT
20064 ENDPROC(__put_user_2)
20065
20066 ENTRY(__put_user_4)
20067 ENTER
20068+
20069+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20070+ GET_THREAD_INFO(%_ASM_BX)
20071 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20072 sub $3,%_ASM_BX
20073 cmp %_ASM_BX,%_ASM_CX
20074 jae bad_put_user
20075-3: movl %eax,(%_ASM_CX)
20076+
20077+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20078+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20079+ cmp %_ASM_BX,%_ASM_CX
20080+ jb 1234f
20081+ xor %ebx,%ebx
20082+1234:
20083+#endif
20084+
20085+#endif
20086+
20087+3: __copyuser_seg movl %eax,(_DEST)
20088 xor %eax,%eax
20089 EXIT
20090 ENDPROC(__put_user_4)
20091
20092 ENTRY(__put_user_8)
20093 ENTER
20094+
20095+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20096+ GET_THREAD_INFO(%_ASM_BX)
20097 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20098 sub $7,%_ASM_BX
20099 cmp %_ASM_BX,%_ASM_CX
20100 jae bad_put_user
20101-4: mov %_ASM_AX,(%_ASM_CX)
20102+
20103+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20104+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20105+ cmp %_ASM_BX,%_ASM_CX
20106+ jb 1234f
20107+ xor %ebx,%ebx
20108+1234:
20109+#endif
20110+
20111+#endif
20112+
20113+4: __copyuser_seg mov %_ASM_AX,(_DEST)
20114 #ifdef CONFIG_X86_32
20115-5: movl %edx,4(%_ASM_CX)
20116+5: __copyuser_seg movl %edx,4(_DEST)
20117 #endif
20118 xor %eax,%eax
20119 EXIT
20120diff -urNp linux-2.6.32.46/arch/x86/lib/usercopy_32.c linux-2.6.32.46/arch/x86/lib/usercopy_32.c
20121--- linux-2.6.32.46/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
20122+++ linux-2.6.32.46/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
20123@@ -43,7 +43,7 @@ do { \
20124 __asm__ __volatile__( \
20125 " testl %1,%1\n" \
20126 " jz 2f\n" \
20127- "0: lodsb\n" \
20128+ "0: "__copyuser_seg"lodsb\n" \
20129 " stosb\n" \
20130 " testb %%al,%%al\n" \
20131 " jz 1f\n" \
20132@@ -128,10 +128,12 @@ do { \
20133 int __d0; \
20134 might_fault(); \
20135 __asm__ __volatile__( \
20136+ __COPYUSER_SET_ES \
20137 "0: rep; stosl\n" \
20138 " movl %2,%0\n" \
20139 "1: rep; stosb\n" \
20140 "2:\n" \
20141+ __COPYUSER_RESTORE_ES \
20142 ".section .fixup,\"ax\"\n" \
20143 "3: lea 0(%2,%0,4),%0\n" \
20144 " jmp 2b\n" \
20145@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
20146 might_fault();
20147
20148 __asm__ __volatile__(
20149+ __COPYUSER_SET_ES
20150 " testl %0, %0\n"
20151 " jz 3f\n"
20152 " andl %0,%%ecx\n"
20153@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
20154 " subl %%ecx,%0\n"
20155 " addl %0,%%eax\n"
20156 "1:\n"
20157+ __COPYUSER_RESTORE_ES
20158 ".section .fixup,\"ax\"\n"
20159 "2: xorl %%eax,%%eax\n"
20160 " jmp 1b\n"
20161@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
20162
20163 #ifdef CONFIG_X86_INTEL_USERCOPY
20164 static unsigned long
20165-__copy_user_intel(void __user *to, const void *from, unsigned long size)
20166+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
20167 {
20168 int d0, d1;
20169 __asm__ __volatile__(
20170@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
20171 " .align 2,0x90\n"
20172 "3: movl 0(%4), %%eax\n"
20173 "4: movl 4(%4), %%edx\n"
20174- "5: movl %%eax, 0(%3)\n"
20175- "6: movl %%edx, 4(%3)\n"
20176+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20177+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
20178 "7: movl 8(%4), %%eax\n"
20179 "8: movl 12(%4),%%edx\n"
20180- "9: movl %%eax, 8(%3)\n"
20181- "10: movl %%edx, 12(%3)\n"
20182+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20183+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20184 "11: movl 16(%4), %%eax\n"
20185 "12: movl 20(%4), %%edx\n"
20186- "13: movl %%eax, 16(%3)\n"
20187- "14: movl %%edx, 20(%3)\n"
20188+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20189+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20190 "15: movl 24(%4), %%eax\n"
20191 "16: movl 28(%4), %%edx\n"
20192- "17: movl %%eax, 24(%3)\n"
20193- "18: movl %%edx, 28(%3)\n"
20194+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20195+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20196 "19: movl 32(%4), %%eax\n"
20197 "20: movl 36(%4), %%edx\n"
20198- "21: movl %%eax, 32(%3)\n"
20199- "22: movl %%edx, 36(%3)\n"
20200+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20201+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20202 "23: movl 40(%4), %%eax\n"
20203 "24: movl 44(%4), %%edx\n"
20204- "25: movl %%eax, 40(%3)\n"
20205- "26: movl %%edx, 44(%3)\n"
20206+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20207+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20208 "27: movl 48(%4), %%eax\n"
20209 "28: movl 52(%4), %%edx\n"
20210- "29: movl %%eax, 48(%3)\n"
20211- "30: movl %%edx, 52(%3)\n"
20212+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20213+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20214 "31: movl 56(%4), %%eax\n"
20215 "32: movl 60(%4), %%edx\n"
20216- "33: movl %%eax, 56(%3)\n"
20217- "34: movl %%edx, 60(%3)\n"
20218+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20219+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20220 " addl $-64, %0\n"
20221 " addl $64, %4\n"
20222 " addl $64, %3\n"
20223@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
20224 " shrl $2, %0\n"
20225 " andl $3, %%eax\n"
20226 " cld\n"
20227+ __COPYUSER_SET_ES
20228 "99: rep; movsl\n"
20229 "36: movl %%eax, %0\n"
20230 "37: rep; movsb\n"
20231 "100:\n"
20232+ __COPYUSER_RESTORE_ES
20233+ ".section .fixup,\"ax\"\n"
20234+ "101: lea 0(%%eax,%0,4),%0\n"
20235+ " jmp 100b\n"
20236+ ".previous\n"
20237+ ".section __ex_table,\"a\"\n"
20238+ " .align 4\n"
20239+ " .long 1b,100b\n"
20240+ " .long 2b,100b\n"
20241+ " .long 3b,100b\n"
20242+ " .long 4b,100b\n"
20243+ " .long 5b,100b\n"
20244+ " .long 6b,100b\n"
20245+ " .long 7b,100b\n"
20246+ " .long 8b,100b\n"
20247+ " .long 9b,100b\n"
20248+ " .long 10b,100b\n"
20249+ " .long 11b,100b\n"
20250+ " .long 12b,100b\n"
20251+ " .long 13b,100b\n"
20252+ " .long 14b,100b\n"
20253+ " .long 15b,100b\n"
20254+ " .long 16b,100b\n"
20255+ " .long 17b,100b\n"
20256+ " .long 18b,100b\n"
20257+ " .long 19b,100b\n"
20258+ " .long 20b,100b\n"
20259+ " .long 21b,100b\n"
20260+ " .long 22b,100b\n"
20261+ " .long 23b,100b\n"
20262+ " .long 24b,100b\n"
20263+ " .long 25b,100b\n"
20264+ " .long 26b,100b\n"
20265+ " .long 27b,100b\n"
20266+ " .long 28b,100b\n"
20267+ " .long 29b,100b\n"
20268+ " .long 30b,100b\n"
20269+ " .long 31b,100b\n"
20270+ " .long 32b,100b\n"
20271+ " .long 33b,100b\n"
20272+ " .long 34b,100b\n"
20273+ " .long 35b,100b\n"
20274+ " .long 36b,100b\n"
20275+ " .long 37b,100b\n"
20276+ " .long 99b,101b\n"
20277+ ".previous"
20278+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
20279+ : "1"(to), "2"(from), "0"(size)
20280+ : "eax", "edx", "memory");
20281+ return size;
20282+}
20283+
20284+static unsigned long
20285+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20286+{
20287+ int d0, d1;
20288+ __asm__ __volatile__(
20289+ " .align 2,0x90\n"
20290+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20291+ " cmpl $67, %0\n"
20292+ " jbe 3f\n"
20293+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20294+ " .align 2,0x90\n"
20295+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20296+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20297+ "5: movl %%eax, 0(%3)\n"
20298+ "6: movl %%edx, 4(%3)\n"
20299+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20300+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20301+ "9: movl %%eax, 8(%3)\n"
20302+ "10: movl %%edx, 12(%3)\n"
20303+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20304+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20305+ "13: movl %%eax, 16(%3)\n"
20306+ "14: movl %%edx, 20(%3)\n"
20307+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20308+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20309+ "17: movl %%eax, 24(%3)\n"
20310+ "18: movl %%edx, 28(%3)\n"
20311+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20312+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20313+ "21: movl %%eax, 32(%3)\n"
20314+ "22: movl %%edx, 36(%3)\n"
20315+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20316+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20317+ "25: movl %%eax, 40(%3)\n"
20318+ "26: movl %%edx, 44(%3)\n"
20319+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20320+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20321+ "29: movl %%eax, 48(%3)\n"
20322+ "30: movl %%edx, 52(%3)\n"
20323+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20324+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20325+ "33: movl %%eax, 56(%3)\n"
20326+ "34: movl %%edx, 60(%3)\n"
20327+ " addl $-64, %0\n"
20328+ " addl $64, %4\n"
20329+ " addl $64, %3\n"
20330+ " cmpl $63, %0\n"
20331+ " ja 1b\n"
20332+ "35: movl %0, %%eax\n"
20333+ " shrl $2, %0\n"
20334+ " andl $3, %%eax\n"
20335+ " cld\n"
20336+ "99: rep; "__copyuser_seg" movsl\n"
20337+ "36: movl %%eax, %0\n"
20338+ "37: rep; "__copyuser_seg" movsb\n"
20339+ "100:\n"
20340 ".section .fixup,\"ax\"\n"
20341 "101: lea 0(%%eax,%0,4),%0\n"
20342 " jmp 100b\n"
20343@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
20344 int d0, d1;
20345 __asm__ __volatile__(
20346 " .align 2,0x90\n"
20347- "0: movl 32(%4), %%eax\n"
20348+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20349 " cmpl $67, %0\n"
20350 " jbe 2f\n"
20351- "1: movl 64(%4), %%eax\n"
20352+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20353 " .align 2,0x90\n"
20354- "2: movl 0(%4), %%eax\n"
20355- "21: movl 4(%4), %%edx\n"
20356+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20357+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20358 " movl %%eax, 0(%3)\n"
20359 " movl %%edx, 4(%3)\n"
20360- "3: movl 8(%4), %%eax\n"
20361- "31: movl 12(%4),%%edx\n"
20362+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20363+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20364 " movl %%eax, 8(%3)\n"
20365 " movl %%edx, 12(%3)\n"
20366- "4: movl 16(%4), %%eax\n"
20367- "41: movl 20(%4), %%edx\n"
20368+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20369+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20370 " movl %%eax, 16(%3)\n"
20371 " movl %%edx, 20(%3)\n"
20372- "10: movl 24(%4), %%eax\n"
20373- "51: movl 28(%4), %%edx\n"
20374+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20375+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20376 " movl %%eax, 24(%3)\n"
20377 " movl %%edx, 28(%3)\n"
20378- "11: movl 32(%4), %%eax\n"
20379- "61: movl 36(%4), %%edx\n"
20380+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20381+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20382 " movl %%eax, 32(%3)\n"
20383 " movl %%edx, 36(%3)\n"
20384- "12: movl 40(%4), %%eax\n"
20385- "71: movl 44(%4), %%edx\n"
20386+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20387+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20388 " movl %%eax, 40(%3)\n"
20389 " movl %%edx, 44(%3)\n"
20390- "13: movl 48(%4), %%eax\n"
20391- "81: movl 52(%4), %%edx\n"
20392+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20393+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20394 " movl %%eax, 48(%3)\n"
20395 " movl %%edx, 52(%3)\n"
20396- "14: movl 56(%4), %%eax\n"
20397- "91: movl 60(%4), %%edx\n"
20398+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20399+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20400 " movl %%eax, 56(%3)\n"
20401 " movl %%edx, 60(%3)\n"
20402 " addl $-64, %0\n"
20403@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
20404 " shrl $2, %0\n"
20405 " andl $3, %%eax\n"
20406 " cld\n"
20407- "6: rep; movsl\n"
20408+ "6: rep; "__copyuser_seg" movsl\n"
20409 " movl %%eax,%0\n"
20410- "7: rep; movsb\n"
20411+ "7: rep; "__copyuser_seg" movsb\n"
20412 "8:\n"
20413 ".section .fixup,\"ax\"\n"
20414 "9: lea 0(%%eax,%0,4),%0\n"
20415@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
20416
20417 __asm__ __volatile__(
20418 " .align 2,0x90\n"
20419- "0: movl 32(%4), %%eax\n"
20420+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20421 " cmpl $67, %0\n"
20422 " jbe 2f\n"
20423- "1: movl 64(%4), %%eax\n"
20424+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20425 " .align 2,0x90\n"
20426- "2: movl 0(%4), %%eax\n"
20427- "21: movl 4(%4), %%edx\n"
20428+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20429+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20430 " movnti %%eax, 0(%3)\n"
20431 " movnti %%edx, 4(%3)\n"
20432- "3: movl 8(%4), %%eax\n"
20433- "31: movl 12(%4),%%edx\n"
20434+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20435+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20436 " movnti %%eax, 8(%3)\n"
20437 " movnti %%edx, 12(%3)\n"
20438- "4: movl 16(%4), %%eax\n"
20439- "41: movl 20(%4), %%edx\n"
20440+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20441+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20442 " movnti %%eax, 16(%3)\n"
20443 " movnti %%edx, 20(%3)\n"
20444- "10: movl 24(%4), %%eax\n"
20445- "51: movl 28(%4), %%edx\n"
20446+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20447+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20448 " movnti %%eax, 24(%3)\n"
20449 " movnti %%edx, 28(%3)\n"
20450- "11: movl 32(%4), %%eax\n"
20451- "61: movl 36(%4), %%edx\n"
20452+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20453+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20454 " movnti %%eax, 32(%3)\n"
20455 " movnti %%edx, 36(%3)\n"
20456- "12: movl 40(%4), %%eax\n"
20457- "71: movl 44(%4), %%edx\n"
20458+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20459+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20460 " movnti %%eax, 40(%3)\n"
20461 " movnti %%edx, 44(%3)\n"
20462- "13: movl 48(%4), %%eax\n"
20463- "81: movl 52(%4), %%edx\n"
20464+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20465+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20466 " movnti %%eax, 48(%3)\n"
20467 " movnti %%edx, 52(%3)\n"
20468- "14: movl 56(%4), %%eax\n"
20469- "91: movl 60(%4), %%edx\n"
20470+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20471+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20472 " movnti %%eax, 56(%3)\n"
20473 " movnti %%edx, 60(%3)\n"
20474 " addl $-64, %0\n"
20475@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
20476 " shrl $2, %0\n"
20477 " andl $3, %%eax\n"
20478 " cld\n"
20479- "6: rep; movsl\n"
20480+ "6: rep; "__copyuser_seg" movsl\n"
20481 " movl %%eax,%0\n"
20482- "7: rep; movsb\n"
20483+ "7: rep; "__copyuser_seg" movsb\n"
20484 "8:\n"
20485 ".section .fixup,\"ax\"\n"
20486 "9: lea 0(%%eax,%0,4),%0\n"
20487@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
20488
20489 __asm__ __volatile__(
20490 " .align 2,0x90\n"
20491- "0: movl 32(%4), %%eax\n"
20492+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20493 " cmpl $67, %0\n"
20494 " jbe 2f\n"
20495- "1: movl 64(%4), %%eax\n"
20496+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20497 " .align 2,0x90\n"
20498- "2: movl 0(%4), %%eax\n"
20499- "21: movl 4(%4), %%edx\n"
20500+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20501+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20502 " movnti %%eax, 0(%3)\n"
20503 " movnti %%edx, 4(%3)\n"
20504- "3: movl 8(%4), %%eax\n"
20505- "31: movl 12(%4),%%edx\n"
20506+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20507+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20508 " movnti %%eax, 8(%3)\n"
20509 " movnti %%edx, 12(%3)\n"
20510- "4: movl 16(%4), %%eax\n"
20511- "41: movl 20(%4), %%edx\n"
20512+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20513+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20514 " movnti %%eax, 16(%3)\n"
20515 " movnti %%edx, 20(%3)\n"
20516- "10: movl 24(%4), %%eax\n"
20517- "51: movl 28(%4), %%edx\n"
20518+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20519+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20520 " movnti %%eax, 24(%3)\n"
20521 " movnti %%edx, 28(%3)\n"
20522- "11: movl 32(%4), %%eax\n"
20523- "61: movl 36(%4), %%edx\n"
20524+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20525+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20526 " movnti %%eax, 32(%3)\n"
20527 " movnti %%edx, 36(%3)\n"
20528- "12: movl 40(%4), %%eax\n"
20529- "71: movl 44(%4), %%edx\n"
20530+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20531+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20532 " movnti %%eax, 40(%3)\n"
20533 " movnti %%edx, 44(%3)\n"
20534- "13: movl 48(%4), %%eax\n"
20535- "81: movl 52(%4), %%edx\n"
20536+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20537+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20538 " movnti %%eax, 48(%3)\n"
20539 " movnti %%edx, 52(%3)\n"
20540- "14: movl 56(%4), %%eax\n"
20541- "91: movl 60(%4), %%edx\n"
20542+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20543+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20544 " movnti %%eax, 56(%3)\n"
20545 " movnti %%edx, 60(%3)\n"
20546 " addl $-64, %0\n"
20547@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
20548 " shrl $2, %0\n"
20549 " andl $3, %%eax\n"
20550 " cld\n"
20551- "6: rep; movsl\n"
20552+ "6: rep; "__copyuser_seg" movsl\n"
20553 " movl %%eax,%0\n"
20554- "7: rep; movsb\n"
20555+ "7: rep; "__copyuser_seg" movsb\n"
20556 "8:\n"
20557 ".section .fixup,\"ax\"\n"
20558 "9: lea 0(%%eax,%0,4),%0\n"
20559@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
20560 */
20561 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
20562 unsigned long size);
20563-unsigned long __copy_user_intel(void __user *to, const void *from,
20564+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
20565+ unsigned long size);
20566+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
20567 unsigned long size);
20568 unsigned long __copy_user_zeroing_intel_nocache(void *to,
20569 const void __user *from, unsigned long size);
20570 #endif /* CONFIG_X86_INTEL_USERCOPY */
20571
20572 /* Generic arbitrary sized copy. */
20573-#define __copy_user(to, from, size) \
20574+#define __copy_user(to, from, size, prefix, set, restore) \
20575 do { \
20576 int __d0, __d1, __d2; \
20577 __asm__ __volatile__( \
20578+ set \
20579 " cmp $7,%0\n" \
20580 " jbe 1f\n" \
20581 " movl %1,%0\n" \
20582 " negl %0\n" \
20583 " andl $7,%0\n" \
20584 " subl %0,%3\n" \
20585- "4: rep; movsb\n" \
20586+ "4: rep; "prefix"movsb\n" \
20587 " movl %3,%0\n" \
20588 " shrl $2,%0\n" \
20589 " andl $3,%3\n" \
20590 " .align 2,0x90\n" \
20591- "0: rep; movsl\n" \
20592+ "0: rep; "prefix"movsl\n" \
20593 " movl %3,%0\n" \
20594- "1: rep; movsb\n" \
20595+ "1: rep; "prefix"movsb\n" \
20596 "2:\n" \
20597+ restore \
20598 ".section .fixup,\"ax\"\n" \
20599 "5: addl %3,%0\n" \
20600 " jmp 2b\n" \
20601@@ -682,14 +799,14 @@ do { \
20602 " negl %0\n" \
20603 " andl $7,%0\n" \
20604 " subl %0,%3\n" \
20605- "4: rep; movsb\n" \
20606+ "4: rep; "__copyuser_seg"movsb\n" \
20607 " movl %3,%0\n" \
20608 " shrl $2,%0\n" \
20609 " andl $3,%3\n" \
20610 " .align 2,0x90\n" \
20611- "0: rep; movsl\n" \
20612+ "0: rep; "__copyuser_seg"movsl\n" \
20613 " movl %3,%0\n" \
20614- "1: rep; movsb\n" \
20615+ "1: rep; "__copyuser_seg"movsb\n" \
20616 "2:\n" \
20617 ".section .fixup,\"ax\"\n" \
20618 "5: addl %3,%0\n" \
20619@@ -775,9 +892,9 @@ survive:
20620 }
20621 #endif
20622 if (movsl_is_ok(to, from, n))
20623- __copy_user(to, from, n);
20624+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
20625 else
20626- n = __copy_user_intel(to, from, n);
20627+ n = __generic_copy_to_user_intel(to, from, n);
20628 return n;
20629 }
20630 EXPORT_SYMBOL(__copy_to_user_ll);
20631@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
20632 unsigned long n)
20633 {
20634 if (movsl_is_ok(to, from, n))
20635- __copy_user(to, from, n);
20636+ __copy_user(to, from, n, __copyuser_seg, "", "");
20637 else
20638- n = __copy_user_intel((void __user *)to,
20639- (const void *)from, n);
20640+ n = __generic_copy_from_user_intel(to, from, n);
20641 return n;
20642 }
20643 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
20644@@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
20645 if (n > 64 && cpu_has_xmm2)
20646 n = __copy_user_intel_nocache(to, from, n);
20647 else
20648- __copy_user(to, from, n);
20649+ __copy_user(to, from, n, __copyuser_seg, "", "");
20650 #else
20651- __copy_user(to, from, n);
20652+ __copy_user(to, from, n, __copyuser_seg, "", "");
20653 #endif
20654 return n;
20655 }
20656 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
20657
20658-/**
20659- * copy_to_user: - Copy a block of data into user space.
20660- * @to: Destination address, in user space.
20661- * @from: Source address, in kernel space.
20662- * @n: Number of bytes to copy.
20663- *
20664- * Context: User context only. This function may sleep.
20665- *
20666- * Copy data from kernel space to user space.
20667- *
20668- * Returns number of bytes that could not be copied.
20669- * On success, this will be zero.
20670- */
20671-unsigned long
20672-copy_to_user(void __user *to, const void *from, unsigned long n)
20673+#ifdef CONFIG_PAX_MEMORY_UDEREF
20674+void __set_fs(mm_segment_t x)
20675 {
20676- if (access_ok(VERIFY_WRITE, to, n))
20677- n = __copy_to_user(to, from, n);
20678- return n;
20679+ switch (x.seg) {
20680+ case 0:
20681+ loadsegment(gs, 0);
20682+ break;
20683+ case TASK_SIZE_MAX:
20684+ loadsegment(gs, __USER_DS);
20685+ break;
20686+ case -1UL:
20687+ loadsegment(gs, __KERNEL_DS);
20688+ break;
20689+ default:
20690+ BUG();
20691+ }
20692+ return;
20693 }
20694-EXPORT_SYMBOL(copy_to_user);
20695+EXPORT_SYMBOL(__set_fs);
20696
20697-/**
20698- * copy_from_user: - Copy a block of data from user space.
20699- * @to: Destination address, in kernel space.
20700- * @from: Source address, in user space.
20701- * @n: Number of bytes to copy.
20702- *
20703- * Context: User context only. This function may sleep.
20704- *
20705- * Copy data from user space to kernel space.
20706- *
20707- * Returns number of bytes that could not be copied.
20708- * On success, this will be zero.
20709- *
20710- * If some data could not be copied, this function will pad the copied
20711- * data to the requested size using zero bytes.
20712- */
20713-unsigned long
20714-copy_from_user(void *to, const void __user *from, unsigned long n)
20715+void set_fs(mm_segment_t x)
20716 {
20717- if (access_ok(VERIFY_READ, from, n))
20718- n = __copy_from_user(to, from, n);
20719- else
20720- memset(to, 0, n);
20721- return n;
20722+ current_thread_info()->addr_limit = x;
20723+ __set_fs(x);
20724 }
20725-EXPORT_SYMBOL(copy_from_user);
20726+EXPORT_SYMBOL(set_fs);
20727+#endif
20728diff -urNp linux-2.6.32.46/arch/x86/lib/usercopy_64.c linux-2.6.32.46/arch/x86/lib/usercopy_64.c
20729--- linux-2.6.32.46/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
20730+++ linux-2.6.32.46/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
20731@@ -42,6 +42,12 @@ long
20732 __strncpy_from_user(char *dst, const char __user *src, long count)
20733 {
20734 long res;
20735+
20736+#ifdef CONFIG_PAX_MEMORY_UDEREF
20737+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
20738+ src += PAX_USER_SHADOW_BASE;
20739+#endif
20740+
20741 __do_strncpy_from_user(dst, src, count, res);
20742 return res;
20743 }
20744@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
20745 {
20746 long __d0;
20747 might_fault();
20748+
20749+#ifdef CONFIG_PAX_MEMORY_UDEREF
20750+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
20751+ addr += PAX_USER_SHADOW_BASE;
20752+#endif
20753+
20754 /* no memory constraint because it doesn't change any memory gcc knows
20755 about */
20756 asm volatile(
20757@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
20758
20759 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
20760 {
20761- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20762+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20763+
20764+#ifdef CONFIG_PAX_MEMORY_UDEREF
20765+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
20766+ to += PAX_USER_SHADOW_BASE;
20767+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
20768+ from += PAX_USER_SHADOW_BASE;
20769+#endif
20770+
20771 return copy_user_generic((__force void *)to, (__force void *)from, len);
20772- }
20773- return len;
20774+ }
20775+ return len;
20776 }
20777 EXPORT_SYMBOL(copy_in_user);
20778
20779diff -urNp linux-2.6.32.46/arch/x86/Makefile linux-2.6.32.46/arch/x86/Makefile
20780--- linux-2.6.32.46/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
20781+++ linux-2.6.32.46/arch/x86/Makefile 2011-07-19 18:16:02.000000000 -0400
20782@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
20783 else
20784 BITS := 64
20785 UTS_MACHINE := x86_64
20786+ biarch := $(call cc-option,-m64)
20787 CHECKFLAGS += -D__x86_64__ -m64
20788
20789 KBUILD_AFLAGS += -m64
20790@@ -189,3 +190,12 @@ define archhelp
20791 echo ' FDARGS="..." arguments for the booted kernel'
20792 echo ' FDINITRD=file initrd for the booted kernel'
20793 endef
20794+
20795+define OLD_LD
20796+
20797+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
20798+*** Please upgrade your binutils to 2.18 or newer
20799+endef
20800+
20801+archprepare:
20802+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
20803diff -urNp linux-2.6.32.46/arch/x86/mm/extable.c linux-2.6.32.46/arch/x86/mm/extable.c
20804--- linux-2.6.32.46/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
20805+++ linux-2.6.32.46/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
20806@@ -1,14 +1,71 @@
20807 #include <linux/module.h>
20808 #include <linux/spinlock.h>
20809+#include <linux/sort.h>
20810 #include <asm/uaccess.h>
20811+#include <asm/pgtable.h>
20812
20813+/*
20814+ * The exception table needs to be sorted so that the binary
20815+ * search that we use to find entries in it works properly.
20816+ * This is used both for the kernel exception table and for
20817+ * the exception tables of modules that get loaded.
20818+ */
20819+static int cmp_ex(const void *a, const void *b)
20820+{
20821+ const struct exception_table_entry *x = a, *y = b;
20822+
20823+ /* avoid overflow */
20824+ if (x->insn > y->insn)
20825+ return 1;
20826+ if (x->insn < y->insn)
20827+ return -1;
20828+ return 0;
20829+}
20830+
20831+static void swap_ex(void *a, void *b, int size)
20832+{
20833+ struct exception_table_entry t, *x = a, *y = b;
20834+
20835+ t = *x;
20836+
20837+ pax_open_kernel();
20838+ *x = *y;
20839+ *y = t;
20840+ pax_close_kernel();
20841+}
20842+
20843+void sort_extable(struct exception_table_entry *start,
20844+ struct exception_table_entry *finish)
20845+{
20846+ sort(start, finish - start, sizeof(struct exception_table_entry),
20847+ cmp_ex, swap_ex);
20848+}
20849+
20850+#ifdef CONFIG_MODULES
20851+/*
20852+ * If the exception table is sorted, any referring to the module init
20853+ * will be at the beginning or the end.
20854+ */
20855+void trim_init_extable(struct module *m)
20856+{
20857+ /*trim the beginning*/
20858+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
20859+ m->extable++;
20860+ m->num_exentries--;
20861+ }
20862+ /*trim the end*/
20863+ while (m->num_exentries &&
20864+ within_module_init(m->extable[m->num_exentries-1].insn, m))
20865+ m->num_exentries--;
20866+}
20867+#endif /* CONFIG_MODULES */
20868
20869 int fixup_exception(struct pt_regs *regs)
20870 {
20871 const struct exception_table_entry *fixup;
20872
20873 #ifdef CONFIG_PNPBIOS
20874- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
20875+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
20876 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
20877 extern u32 pnp_bios_is_utter_crap;
20878 pnp_bios_is_utter_crap = 1;
20879diff -urNp linux-2.6.32.46/arch/x86/mm/fault.c linux-2.6.32.46/arch/x86/mm/fault.c
20880--- linux-2.6.32.46/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
20881+++ linux-2.6.32.46/arch/x86/mm/fault.c 2011-08-17 20:06:44.000000000 -0400
20882@@ -11,10 +11,19 @@
20883 #include <linux/kprobes.h> /* __kprobes, ... */
20884 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
20885 #include <linux/perf_event.h> /* perf_sw_event */
20886+#include <linux/unistd.h>
20887+#include <linux/compiler.h>
20888
20889 #include <asm/traps.h> /* dotraplinkage, ... */
20890 #include <asm/pgalloc.h> /* pgd_*(), ... */
20891 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20892+#include <asm/vsyscall.h>
20893+#include <asm/tlbflush.h>
20894+
20895+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20896+#include <asm/stacktrace.h>
20897+#include "../kernel/dumpstack.h"
20898+#endif
20899
20900 /*
20901 * Page fault error code bits:
20902@@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
20903 int ret = 0;
20904
20905 /* kprobe_running() needs smp_processor_id() */
20906- if (kprobes_built_in() && !user_mode_vm(regs)) {
20907+ if (kprobes_built_in() && !user_mode(regs)) {
20908 preempt_disable();
20909 if (kprobe_running() && kprobe_fault_handler(regs, 14))
20910 ret = 1;
20911@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
20912 return !instr_lo || (instr_lo>>1) == 1;
20913 case 0x00:
20914 /* Prefetch instruction is 0x0F0D or 0x0F18 */
20915- if (probe_kernel_address(instr, opcode))
20916+ if (user_mode(regs)) {
20917+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20918+ return 0;
20919+ } else if (probe_kernel_address(instr, opcode))
20920 return 0;
20921
20922 *prefetch = (instr_lo == 0xF) &&
20923@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
20924 while (instr < max_instr) {
20925 unsigned char opcode;
20926
20927- if (probe_kernel_address(instr, opcode))
20928+ if (user_mode(regs)) {
20929+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20930+ break;
20931+ } else if (probe_kernel_address(instr, opcode))
20932 break;
20933
20934 instr++;
20935@@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
20936 force_sig_info(si_signo, &info, tsk);
20937 }
20938
20939+#ifdef CONFIG_PAX_EMUTRAMP
20940+static int pax_handle_fetch_fault(struct pt_regs *regs);
20941+#endif
20942+
20943+#ifdef CONFIG_PAX_PAGEEXEC
20944+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
20945+{
20946+ pgd_t *pgd;
20947+ pud_t *pud;
20948+ pmd_t *pmd;
20949+
20950+ pgd = pgd_offset(mm, address);
20951+ if (!pgd_present(*pgd))
20952+ return NULL;
20953+ pud = pud_offset(pgd, address);
20954+ if (!pud_present(*pud))
20955+ return NULL;
20956+ pmd = pmd_offset(pud, address);
20957+ if (!pmd_present(*pmd))
20958+ return NULL;
20959+ return pmd;
20960+}
20961+#endif
20962+
20963 DEFINE_SPINLOCK(pgd_lock);
20964 LIST_HEAD(pgd_list);
20965
20966@@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
20967 address += PMD_SIZE) {
20968
20969 unsigned long flags;
20970+
20971+#ifdef CONFIG_PAX_PER_CPU_PGD
20972+ unsigned long cpu;
20973+#else
20974 struct page *page;
20975+#endif
20976
20977 spin_lock_irqsave(&pgd_lock, flags);
20978+
20979+#ifdef CONFIG_PAX_PER_CPU_PGD
20980+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20981+ pgd_t *pgd = get_cpu_pgd(cpu);
20982+#else
20983 list_for_each_entry(page, &pgd_list, lru) {
20984- if (!vmalloc_sync_one(page_address(page), address))
20985+ pgd_t *pgd = page_address(page);
20986+#endif
20987+
20988+ if (!vmalloc_sync_one(pgd, address))
20989 break;
20990 }
20991 spin_unlock_irqrestore(&pgd_lock, flags);
20992@@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
20993 * an interrupt in the middle of a task switch..
20994 */
20995 pgd_paddr = read_cr3();
20996+
20997+#ifdef CONFIG_PAX_PER_CPU_PGD
20998+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
20999+#endif
21000+
21001 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21002 if (!pmd_k)
21003 return -1;
21004@@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
21005
21006 const pgd_t *pgd_ref = pgd_offset_k(address);
21007 unsigned long flags;
21008+
21009+#ifdef CONFIG_PAX_PER_CPU_PGD
21010+ unsigned long cpu;
21011+#else
21012 struct page *page;
21013+#endif
21014
21015 if (pgd_none(*pgd_ref))
21016 continue;
21017
21018 spin_lock_irqsave(&pgd_lock, flags);
21019+
21020+#ifdef CONFIG_PAX_PER_CPU_PGD
21021+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21022+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
21023+#else
21024 list_for_each_entry(page, &pgd_list, lru) {
21025 pgd_t *pgd;
21026 pgd = (pgd_t *)page_address(page) + pgd_index(address);
21027+#endif
21028+
21029 if (pgd_none(*pgd))
21030 set_pgd(pgd, *pgd_ref);
21031 else
21032@@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
21033 * happen within a race in page table update. In the later
21034 * case just flush:
21035 */
21036+
21037+#ifdef CONFIG_PAX_PER_CPU_PGD
21038+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21039+ pgd = pgd_offset_cpu(smp_processor_id(), address);
21040+#else
21041 pgd = pgd_offset(current->active_mm, address);
21042+#endif
21043+
21044 pgd_ref = pgd_offset_k(address);
21045 if (pgd_none(*pgd_ref))
21046 return -1;
21047@@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
21048 static int is_errata100(struct pt_regs *regs, unsigned long address)
21049 {
21050 #ifdef CONFIG_X86_64
21051- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21052+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21053 return 1;
21054 #endif
21055 return 0;
21056@@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
21057 }
21058
21059 static const char nx_warning[] = KERN_CRIT
21060-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21061+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21062
21063 static void
21064 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21065@@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
21066 if (!oops_may_print())
21067 return;
21068
21069- if (error_code & PF_INSTR) {
21070+ if (nx_enabled && (error_code & PF_INSTR)) {
21071 unsigned int level;
21072
21073 pte_t *pte = lookup_address(address, &level);
21074
21075 if (pte && pte_present(*pte) && !pte_exec(*pte))
21076- printk(nx_warning, current_uid());
21077+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21078 }
21079
21080+#ifdef CONFIG_PAX_KERNEXEC
21081+ if (init_mm.start_code <= address && address < init_mm.end_code) {
21082+ if (current->signal->curr_ip)
21083+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21084+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21085+ else
21086+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21087+ current->comm, task_pid_nr(current), current_uid(), current_euid());
21088+ }
21089+#endif
21090+
21091 printk(KERN_ALERT "BUG: unable to handle kernel ");
21092 if (address < PAGE_SIZE)
21093 printk(KERN_CONT "NULL pointer dereference");
21094@@ -704,6 +791,70 @@ __bad_area_nosemaphore(struct pt_regs *r
21095 unsigned long address, int si_code)
21096 {
21097 struct task_struct *tsk = current;
21098+#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21099+ struct mm_struct *mm = tsk->mm;
21100+#endif
21101+
21102+#ifdef CONFIG_X86_64
21103+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
21104+ if (regs->ip == (unsigned long)vgettimeofday) {
21105+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
21106+ return;
21107+ } else if (regs->ip == (unsigned long)vtime) {
21108+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
21109+ return;
21110+ } else if (regs->ip == (unsigned long)vgetcpu) {
21111+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
21112+ return;
21113+ }
21114+ }
21115+#endif
21116+
21117+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21118+ if (mm && (error_code & PF_USER)) {
21119+ unsigned long ip = regs->ip;
21120+
21121+ if (v8086_mode(regs))
21122+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21123+
21124+ /*
21125+ * It's possible to have interrupts off here:
21126+ */
21127+ local_irq_enable();
21128+
21129+#ifdef CONFIG_PAX_PAGEEXEC
21130+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
21131+ ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
21132+
21133+#ifdef CONFIG_PAX_EMUTRAMP
21134+ switch (pax_handle_fetch_fault(regs)) {
21135+ case 2:
21136+ return;
21137+ }
21138+#endif
21139+
21140+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
21141+ do_group_exit(SIGKILL);
21142+ }
21143+#endif
21144+
21145+#ifdef CONFIG_PAX_SEGMEXEC
21146+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
21147+
21148+#ifdef CONFIG_PAX_EMUTRAMP
21149+ switch (pax_handle_fetch_fault(regs)) {
21150+ case 2:
21151+ return;
21152+ }
21153+#endif
21154+
21155+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
21156+ do_group_exit(SIGKILL);
21157+ }
21158+#endif
21159+
21160+ }
21161+#endif
21162
21163 /* User mode accesses just cause a SIGSEGV */
21164 if (error_code & PF_USER) {
21165@@ -857,6 +1008,99 @@ static int spurious_fault_check(unsigned
21166 return 1;
21167 }
21168
21169+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21170+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21171+{
21172+ pte_t *pte;
21173+ pmd_t *pmd;
21174+ spinlock_t *ptl;
21175+ unsigned char pte_mask;
21176+
21177+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21178+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
21179+ return 0;
21180+
21181+ /* PaX: it's our fault, let's handle it if we can */
21182+
21183+ /* PaX: take a look at read faults before acquiring any locks */
21184+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21185+ /* instruction fetch attempt from a protected page in user mode */
21186+ up_read(&mm->mmap_sem);
21187+
21188+#ifdef CONFIG_PAX_EMUTRAMP
21189+ switch (pax_handle_fetch_fault(regs)) {
21190+ case 2:
21191+ return 1;
21192+ }
21193+#endif
21194+
21195+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21196+ do_group_exit(SIGKILL);
21197+ }
21198+
21199+ pmd = pax_get_pmd(mm, address);
21200+ if (unlikely(!pmd))
21201+ return 0;
21202+
21203+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21204+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21205+ pte_unmap_unlock(pte, ptl);
21206+ return 0;
21207+ }
21208+
21209+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21210+ /* write attempt to a protected page in user mode */
21211+ pte_unmap_unlock(pte, ptl);
21212+ return 0;
21213+ }
21214+
21215+#ifdef CONFIG_SMP
21216+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21217+#else
21218+ if (likely(address > get_limit(regs->cs)))
21219+#endif
21220+ {
21221+ set_pte(pte, pte_mkread(*pte));
21222+ __flush_tlb_one(address);
21223+ pte_unmap_unlock(pte, ptl);
21224+ up_read(&mm->mmap_sem);
21225+ return 1;
21226+ }
21227+
21228+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21229+
21230+ /*
21231+ * PaX: fill DTLB with user rights and retry
21232+ */
21233+ __asm__ __volatile__ (
21234+ "orb %2,(%1)\n"
21235+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21236+/*
21237+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21238+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21239+ * page fault when examined during a TLB load attempt. this is true not only
21240+ * for PTEs holding a non-present entry but also present entries that will
21241+ * raise a page fault (such as those set up by PaX, or the copy-on-write
21242+ * mechanism). in effect it means that we do *not* need to flush the TLBs
21243+ * for our target pages since their PTEs are simply not in the TLBs at all.
21244+
21245+ * the best thing in omitting it is that we gain around 15-20% speed in the
21246+ * fast path of the page fault handler and can get rid of tracing since we
21247+ * can no longer flush unintended entries.
21248+ */
21249+ "invlpg (%0)\n"
21250+#endif
21251+ __copyuser_seg"testb $0,(%0)\n"
21252+ "xorb %3,(%1)\n"
21253+ :
21254+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21255+ : "memory", "cc");
21256+ pte_unmap_unlock(pte, ptl);
21257+ up_read(&mm->mmap_sem);
21258+ return 1;
21259+}
21260+#endif
21261+
21262 /*
21263 * Handle a spurious fault caused by a stale TLB entry.
21264 *
21265@@ -923,6 +1167,9 @@ int show_unhandled_signals = 1;
21266 static inline int
21267 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
21268 {
21269+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21270+ return 1;
21271+
21272 if (write) {
21273 /* write, present and write, not present: */
21274 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21275@@ -956,17 +1203,31 @@ do_page_fault(struct pt_regs *regs, unsi
21276 {
21277 struct vm_area_struct *vma;
21278 struct task_struct *tsk;
21279- unsigned long address;
21280 struct mm_struct *mm;
21281 int write;
21282 int fault;
21283
21284+ /* Get the faulting address: */
21285+ unsigned long address = read_cr2();
21286+
21287+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21288+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21289+ if (!search_exception_tables(regs->ip)) {
21290+ bad_area_nosemaphore(regs, error_code, address);
21291+ return;
21292+ }
21293+ if (address < PAX_USER_SHADOW_BASE) {
21294+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21295+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
21296+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21297+ } else
21298+ address -= PAX_USER_SHADOW_BASE;
21299+ }
21300+#endif
21301+
21302 tsk = current;
21303 mm = tsk->mm;
21304
21305- /* Get the faulting address: */
21306- address = read_cr2();
21307-
21308 /*
21309 * Detect and handle instructions that would cause a page fault for
21310 * both a tracked kernel page and a userspace page.
21311@@ -1026,7 +1287,7 @@ do_page_fault(struct pt_regs *regs, unsi
21312 * User-mode registers count as a user access even for any
21313 * potential system fault or CPU buglet:
21314 */
21315- if (user_mode_vm(regs)) {
21316+ if (user_mode(regs)) {
21317 local_irq_enable();
21318 error_code |= PF_USER;
21319 } else {
21320@@ -1080,6 +1341,11 @@ do_page_fault(struct pt_regs *regs, unsi
21321 might_sleep();
21322 }
21323
21324+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21325+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21326+ return;
21327+#endif
21328+
21329 vma = find_vma(mm, address);
21330 if (unlikely(!vma)) {
21331 bad_area(regs, error_code, address);
21332@@ -1091,18 +1357,24 @@ do_page_fault(struct pt_regs *regs, unsi
21333 bad_area(regs, error_code, address);
21334 return;
21335 }
21336- if (error_code & PF_USER) {
21337- /*
21338- * Accessing the stack below %sp is always a bug.
21339- * The large cushion allows instructions like enter
21340- * and pusha to work. ("enter $65535, $31" pushes
21341- * 32 pointers and then decrements %sp by 65535.)
21342- */
21343- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21344- bad_area(regs, error_code, address);
21345- return;
21346- }
21347+ /*
21348+ * Accessing the stack below %sp is always a bug.
21349+ * The large cushion allows instructions like enter
21350+ * and pusha to work. ("enter $65535, $31" pushes
21351+ * 32 pointers and then decrements %sp by 65535.)
21352+ */
21353+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21354+ bad_area(regs, error_code, address);
21355+ return;
21356 }
21357+
21358+#ifdef CONFIG_PAX_SEGMEXEC
21359+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21360+ bad_area(regs, error_code, address);
21361+ return;
21362+ }
21363+#endif
21364+
21365 if (unlikely(expand_stack(vma, address))) {
21366 bad_area(regs, error_code, address);
21367 return;
21368@@ -1146,3 +1418,199 @@ good_area:
21369
21370 up_read(&mm->mmap_sem);
21371 }
21372+
21373+#ifdef CONFIG_PAX_EMUTRAMP
21374+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21375+{
21376+ int err;
21377+
21378+ do { /* PaX: gcc trampoline emulation #1 */
21379+ unsigned char mov1, mov2;
21380+ unsigned short jmp;
21381+ unsigned int addr1, addr2;
21382+
21383+#ifdef CONFIG_X86_64
21384+ if ((regs->ip + 11) >> 32)
21385+ break;
21386+#endif
21387+
21388+ err = get_user(mov1, (unsigned char __user *)regs->ip);
21389+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21390+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21391+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21392+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21393+
21394+ if (err)
21395+ break;
21396+
21397+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21398+ regs->cx = addr1;
21399+ regs->ax = addr2;
21400+ regs->ip = addr2;
21401+ return 2;
21402+ }
21403+ } while (0);
21404+
21405+ do { /* PaX: gcc trampoline emulation #2 */
21406+ unsigned char mov, jmp;
21407+ unsigned int addr1, addr2;
21408+
21409+#ifdef CONFIG_X86_64
21410+ if ((regs->ip + 9) >> 32)
21411+ break;
21412+#endif
21413+
21414+ err = get_user(mov, (unsigned char __user *)regs->ip);
21415+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21416+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21417+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21418+
21419+ if (err)
21420+ break;
21421+
21422+ if (mov == 0xB9 && jmp == 0xE9) {
21423+ regs->cx = addr1;
21424+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21425+ return 2;
21426+ }
21427+ } while (0);
21428+
21429+ return 1; /* PaX in action */
21430+}
21431+
21432+#ifdef CONFIG_X86_64
21433+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21434+{
21435+ int err;
21436+
21437+ do { /* PaX: gcc trampoline emulation #1 */
21438+ unsigned short mov1, mov2, jmp1;
21439+ unsigned char jmp2;
21440+ unsigned int addr1;
21441+ unsigned long addr2;
21442+
21443+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21444+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
21445+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
21446+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
21447+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
21448+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
21449+
21450+ if (err)
21451+ break;
21452+
21453+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21454+ regs->r11 = addr1;
21455+ regs->r10 = addr2;
21456+ regs->ip = addr1;
21457+ return 2;
21458+ }
21459+ } while (0);
21460+
21461+ do { /* PaX: gcc trampoline emulation #2 */
21462+ unsigned short mov1, mov2, jmp1;
21463+ unsigned char jmp2;
21464+ unsigned long addr1, addr2;
21465+
21466+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21467+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21468+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21469+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21470+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
21471+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
21472+
21473+ if (err)
21474+ break;
21475+
21476+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21477+ regs->r11 = addr1;
21478+ regs->r10 = addr2;
21479+ regs->ip = addr1;
21480+ return 2;
21481+ }
21482+ } while (0);
21483+
21484+ return 1; /* PaX in action */
21485+}
21486+#endif
21487+
21488+/*
21489+ * PaX: decide what to do with offenders (regs->ip = fault address)
21490+ *
21491+ * returns 1 when task should be killed
21492+ * 2 when gcc trampoline was detected
21493+ */
21494+static int pax_handle_fetch_fault(struct pt_regs *regs)
21495+{
21496+ if (v8086_mode(regs))
21497+ return 1;
21498+
21499+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21500+ return 1;
21501+
21502+#ifdef CONFIG_X86_32
21503+ return pax_handle_fetch_fault_32(regs);
21504+#else
21505+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
21506+ return pax_handle_fetch_fault_32(regs);
21507+ else
21508+ return pax_handle_fetch_fault_64(regs);
21509+#endif
21510+}
21511+#endif
21512+
21513+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21514+void pax_report_insns(void *pc, void *sp)
21515+{
21516+ long i;
21517+
21518+ printk(KERN_ERR "PAX: bytes at PC: ");
21519+ for (i = 0; i < 20; i++) {
21520+ unsigned char c;
21521+ if (get_user(c, (__force unsigned char __user *)pc+i))
21522+ printk(KERN_CONT "?? ");
21523+ else
21524+ printk(KERN_CONT "%02x ", c);
21525+ }
21526+ printk("\n");
21527+
21528+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
21529+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
21530+ unsigned long c;
21531+ if (get_user(c, (__force unsigned long __user *)sp+i))
21532+#ifdef CONFIG_X86_32
21533+ printk(KERN_CONT "???????? ");
21534+#else
21535+ printk(KERN_CONT "???????????????? ");
21536+#endif
21537+ else
21538+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
21539+ }
21540+ printk("\n");
21541+}
21542+#endif
21543+
21544+/**
21545+ * probe_kernel_write(): safely attempt to write to a location
21546+ * @dst: address to write to
21547+ * @src: pointer to the data that shall be written
21548+ * @size: size of the data chunk
21549+ *
21550+ * Safely write to address @dst from the buffer at @src. If a kernel fault
21551+ * happens, handle that and return -EFAULT.
21552+ */
21553+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
21554+{
21555+ long ret;
21556+ mm_segment_t old_fs = get_fs();
21557+
21558+ set_fs(KERNEL_DS);
21559+ pagefault_disable();
21560+ pax_open_kernel();
21561+ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
21562+ pax_close_kernel();
21563+ pagefault_enable();
21564+ set_fs(old_fs);
21565+
21566+ return ret ? -EFAULT : 0;
21567+}
21568diff -urNp linux-2.6.32.46/arch/x86/mm/gup.c linux-2.6.32.46/arch/x86/mm/gup.c
21569--- linux-2.6.32.46/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
21570+++ linux-2.6.32.46/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
21571@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
21572 addr = start;
21573 len = (unsigned long) nr_pages << PAGE_SHIFT;
21574 end = start + len;
21575- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21576+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21577 (void __user *)start, len)))
21578 return 0;
21579
21580diff -urNp linux-2.6.32.46/arch/x86/mm/highmem_32.c linux-2.6.32.46/arch/x86/mm/highmem_32.c
21581--- linux-2.6.32.46/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
21582+++ linux-2.6.32.46/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
21583@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
21584 idx = type + KM_TYPE_NR*smp_processor_id();
21585 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21586 BUG_ON(!pte_none(*(kmap_pte-idx)));
21587+
21588+ pax_open_kernel();
21589 set_pte(kmap_pte-idx, mk_pte(page, prot));
21590+ pax_close_kernel();
21591
21592 return (void *)vaddr;
21593 }
21594diff -urNp linux-2.6.32.46/arch/x86/mm/hugetlbpage.c linux-2.6.32.46/arch/x86/mm/hugetlbpage.c
21595--- linux-2.6.32.46/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
21596+++ linux-2.6.32.46/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
21597@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
21598 struct hstate *h = hstate_file(file);
21599 struct mm_struct *mm = current->mm;
21600 struct vm_area_struct *vma;
21601- unsigned long start_addr;
21602+ unsigned long start_addr, pax_task_size = TASK_SIZE;
21603+
21604+#ifdef CONFIG_PAX_SEGMEXEC
21605+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21606+ pax_task_size = SEGMEXEC_TASK_SIZE;
21607+#endif
21608+
21609+ pax_task_size -= PAGE_SIZE;
21610
21611 if (len > mm->cached_hole_size) {
21612- start_addr = mm->free_area_cache;
21613+ start_addr = mm->free_area_cache;
21614 } else {
21615- start_addr = TASK_UNMAPPED_BASE;
21616- mm->cached_hole_size = 0;
21617+ start_addr = mm->mmap_base;
21618+ mm->cached_hole_size = 0;
21619 }
21620
21621 full_search:
21622@@ -281,26 +288,27 @@ full_search:
21623
21624 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
21625 /* At this point: (!vma || addr < vma->vm_end). */
21626- if (TASK_SIZE - len < addr) {
21627+ if (pax_task_size - len < addr) {
21628 /*
21629 * Start a new search - just in case we missed
21630 * some holes.
21631 */
21632- if (start_addr != TASK_UNMAPPED_BASE) {
21633- start_addr = TASK_UNMAPPED_BASE;
21634+ if (start_addr != mm->mmap_base) {
21635+ start_addr = mm->mmap_base;
21636 mm->cached_hole_size = 0;
21637 goto full_search;
21638 }
21639 return -ENOMEM;
21640 }
21641- if (!vma || addr + len <= vma->vm_start) {
21642- mm->free_area_cache = addr + len;
21643- return addr;
21644- }
21645+ if (check_heap_stack_gap(vma, addr, len))
21646+ break;
21647 if (addr + mm->cached_hole_size < vma->vm_start)
21648 mm->cached_hole_size = vma->vm_start - addr;
21649 addr = ALIGN(vma->vm_end, huge_page_size(h));
21650 }
21651+
21652+ mm->free_area_cache = addr + len;
21653+ return addr;
21654 }
21655
21656 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
21657@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
21658 {
21659 struct hstate *h = hstate_file(file);
21660 struct mm_struct *mm = current->mm;
21661- struct vm_area_struct *vma, *prev_vma;
21662- unsigned long base = mm->mmap_base, addr = addr0;
21663+ struct vm_area_struct *vma;
21664+ unsigned long base = mm->mmap_base, addr;
21665 unsigned long largest_hole = mm->cached_hole_size;
21666- int first_time = 1;
21667
21668 /* don't allow allocations above current base */
21669 if (mm->free_area_cache > base)
21670@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
21671 largest_hole = 0;
21672 mm->free_area_cache = base;
21673 }
21674-try_again:
21675+
21676 /* make sure it can fit in the remaining address space */
21677 if (mm->free_area_cache < len)
21678 goto fail;
21679
21680 /* either no address requested or cant fit in requested address hole */
21681- addr = (mm->free_area_cache - len) & huge_page_mask(h);
21682+ addr = (mm->free_area_cache - len);
21683 do {
21684+ addr &= huge_page_mask(h);
21685+ vma = find_vma(mm, addr);
21686 /*
21687 * Lookup failure means no vma is above this address,
21688 * i.e. return with success:
21689- */
21690- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
21691- return addr;
21692-
21693- /*
21694 * new region fits between prev_vma->vm_end and
21695 * vma->vm_start, use it:
21696 */
21697- if (addr + len <= vma->vm_start &&
21698- (!prev_vma || (addr >= prev_vma->vm_end))) {
21699+ if (check_heap_stack_gap(vma, addr, len)) {
21700 /* remember the address as a hint for next time */
21701- mm->cached_hole_size = largest_hole;
21702- return (mm->free_area_cache = addr);
21703- } else {
21704- /* pull free_area_cache down to the first hole */
21705- if (mm->free_area_cache == vma->vm_end) {
21706- mm->free_area_cache = vma->vm_start;
21707- mm->cached_hole_size = largest_hole;
21708- }
21709+ mm->cached_hole_size = largest_hole;
21710+ return (mm->free_area_cache = addr);
21711+ }
21712+ /* pull free_area_cache down to the first hole */
21713+ if (mm->free_area_cache == vma->vm_end) {
21714+ mm->free_area_cache = vma->vm_start;
21715+ mm->cached_hole_size = largest_hole;
21716 }
21717
21718 /* remember the largest hole we saw so far */
21719 if (addr + largest_hole < vma->vm_start)
21720- largest_hole = vma->vm_start - addr;
21721+ largest_hole = vma->vm_start - addr;
21722
21723 /* try just below the current vma->vm_start */
21724- addr = (vma->vm_start - len) & huge_page_mask(h);
21725- } while (len <= vma->vm_start);
21726+ addr = skip_heap_stack_gap(vma, len);
21727+ } while (!IS_ERR_VALUE(addr));
21728
21729 fail:
21730 /*
21731- * if hint left us with no space for the requested
21732- * mapping then try again:
21733- */
21734- if (first_time) {
21735- mm->free_area_cache = base;
21736- largest_hole = 0;
21737- first_time = 0;
21738- goto try_again;
21739- }
21740- /*
21741 * A failed mmap() very likely causes application failure,
21742 * so fall back to the bottom-up function here. This scenario
21743 * can happen with large stack limits and large mmap()
21744 * allocations.
21745 */
21746- mm->free_area_cache = TASK_UNMAPPED_BASE;
21747+
21748+#ifdef CONFIG_PAX_SEGMEXEC
21749+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21750+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21751+ else
21752+#endif
21753+
21754+ mm->mmap_base = TASK_UNMAPPED_BASE;
21755+
21756+#ifdef CONFIG_PAX_RANDMMAP
21757+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21758+ mm->mmap_base += mm->delta_mmap;
21759+#endif
21760+
21761+ mm->free_area_cache = mm->mmap_base;
21762 mm->cached_hole_size = ~0UL;
21763 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
21764 len, pgoff, flags);
21765@@ -387,6 +393,7 @@ fail:
21766 /*
21767 * Restore the topdown base:
21768 */
21769+ mm->mmap_base = base;
21770 mm->free_area_cache = base;
21771 mm->cached_hole_size = ~0UL;
21772
21773@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
21774 struct hstate *h = hstate_file(file);
21775 struct mm_struct *mm = current->mm;
21776 struct vm_area_struct *vma;
21777+ unsigned long pax_task_size = TASK_SIZE;
21778
21779 if (len & ~huge_page_mask(h))
21780 return -EINVAL;
21781- if (len > TASK_SIZE)
21782+
21783+#ifdef CONFIG_PAX_SEGMEXEC
21784+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21785+ pax_task_size = SEGMEXEC_TASK_SIZE;
21786+#endif
21787+
21788+ pax_task_size -= PAGE_SIZE;
21789+
21790+ if (len > pax_task_size)
21791 return -ENOMEM;
21792
21793 if (flags & MAP_FIXED) {
21794@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
21795 if (addr) {
21796 addr = ALIGN(addr, huge_page_size(h));
21797 vma = find_vma(mm, addr);
21798- if (TASK_SIZE - len >= addr &&
21799- (!vma || addr + len <= vma->vm_start))
21800+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
21801 return addr;
21802 }
21803 if (mm->get_unmapped_area == arch_get_unmapped_area)
21804diff -urNp linux-2.6.32.46/arch/x86/mm/init_32.c linux-2.6.32.46/arch/x86/mm/init_32.c
21805--- linux-2.6.32.46/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
21806+++ linux-2.6.32.46/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
21807@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
21808 }
21809
21810 /*
21811- * Creates a middle page table and puts a pointer to it in the
21812- * given global directory entry. This only returns the gd entry
21813- * in non-PAE compilation mode, since the middle layer is folded.
21814- */
21815-static pmd_t * __init one_md_table_init(pgd_t *pgd)
21816-{
21817- pud_t *pud;
21818- pmd_t *pmd_table;
21819-
21820-#ifdef CONFIG_X86_PAE
21821- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
21822- if (after_bootmem)
21823- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
21824- else
21825- pmd_table = (pmd_t *)alloc_low_page();
21826- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
21827- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
21828- pud = pud_offset(pgd, 0);
21829- BUG_ON(pmd_table != pmd_offset(pud, 0));
21830-
21831- return pmd_table;
21832- }
21833-#endif
21834- pud = pud_offset(pgd, 0);
21835- pmd_table = pmd_offset(pud, 0);
21836-
21837- return pmd_table;
21838-}
21839-
21840-/*
21841 * Create a page table and place a pointer to it in a middle page
21842 * directory entry:
21843 */
21844@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
21845 page_table = (pte_t *)alloc_low_page();
21846
21847 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
21848+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21849+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
21850+#else
21851 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
21852+#endif
21853 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
21854 }
21855
21856 return pte_offset_kernel(pmd, 0);
21857 }
21858
21859+static pmd_t * __init one_md_table_init(pgd_t *pgd)
21860+{
21861+ pud_t *pud;
21862+ pmd_t *pmd_table;
21863+
21864+ pud = pud_offset(pgd, 0);
21865+ pmd_table = pmd_offset(pud, 0);
21866+
21867+ return pmd_table;
21868+}
21869+
21870 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
21871 {
21872 int pgd_idx = pgd_index(vaddr);
21873@@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
21874 int pgd_idx, pmd_idx;
21875 unsigned long vaddr;
21876 pgd_t *pgd;
21877+ pud_t *pud;
21878 pmd_t *pmd;
21879 pte_t *pte = NULL;
21880
21881@@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
21882 pgd = pgd_base + pgd_idx;
21883
21884 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
21885- pmd = one_md_table_init(pgd);
21886- pmd = pmd + pmd_index(vaddr);
21887+ pud = pud_offset(pgd, vaddr);
21888+ pmd = pmd_offset(pud, vaddr);
21889+
21890+#ifdef CONFIG_X86_PAE
21891+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21892+#endif
21893+
21894 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
21895 pmd++, pmd_idx++) {
21896 pte = page_table_kmap_check(one_page_table_init(pmd),
21897@@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
21898 }
21899 }
21900
21901-static inline int is_kernel_text(unsigned long addr)
21902+static inline int is_kernel_text(unsigned long start, unsigned long end)
21903 {
21904- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
21905- return 1;
21906- return 0;
21907+ if ((start > ktla_ktva((unsigned long)_etext) ||
21908+ end <= ktla_ktva((unsigned long)_stext)) &&
21909+ (start > ktla_ktva((unsigned long)_einittext) ||
21910+ end <= ktla_ktva((unsigned long)_sinittext)) &&
21911+
21912+#ifdef CONFIG_ACPI_SLEEP
21913+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
21914+#endif
21915+
21916+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
21917+ return 0;
21918+ return 1;
21919 }
21920
21921 /*
21922@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
21923 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
21924 unsigned long start_pfn, end_pfn;
21925 pgd_t *pgd_base = swapper_pg_dir;
21926- int pgd_idx, pmd_idx, pte_ofs;
21927+ unsigned int pgd_idx, pmd_idx, pte_ofs;
21928 unsigned long pfn;
21929 pgd_t *pgd;
21930+ pud_t *pud;
21931 pmd_t *pmd;
21932 pte_t *pte;
21933 unsigned pages_2m, pages_4k;
21934@@ -278,8 +279,13 @@ repeat:
21935 pfn = start_pfn;
21936 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21937 pgd = pgd_base + pgd_idx;
21938- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
21939- pmd = one_md_table_init(pgd);
21940+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
21941+ pud = pud_offset(pgd, 0);
21942+ pmd = pmd_offset(pud, 0);
21943+
21944+#ifdef CONFIG_X86_PAE
21945+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21946+#endif
21947
21948 if (pfn >= end_pfn)
21949 continue;
21950@@ -291,14 +297,13 @@ repeat:
21951 #endif
21952 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
21953 pmd++, pmd_idx++) {
21954- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
21955+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
21956
21957 /*
21958 * Map with big pages if possible, otherwise
21959 * create normal page tables:
21960 */
21961 if (use_pse) {
21962- unsigned int addr2;
21963 pgprot_t prot = PAGE_KERNEL_LARGE;
21964 /*
21965 * first pass will use the same initial
21966@@ -308,11 +313,7 @@ repeat:
21967 __pgprot(PTE_IDENT_ATTR |
21968 _PAGE_PSE);
21969
21970- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
21971- PAGE_OFFSET + PAGE_SIZE-1;
21972-
21973- if (is_kernel_text(addr) ||
21974- is_kernel_text(addr2))
21975+ if (is_kernel_text(address, address + PMD_SIZE))
21976 prot = PAGE_KERNEL_LARGE_EXEC;
21977
21978 pages_2m++;
21979@@ -329,7 +330,7 @@ repeat:
21980 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21981 pte += pte_ofs;
21982 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
21983- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
21984+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
21985 pgprot_t prot = PAGE_KERNEL;
21986 /*
21987 * first pass will use the same initial
21988@@ -337,7 +338,7 @@ repeat:
21989 */
21990 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
21991
21992- if (is_kernel_text(addr))
21993+ if (is_kernel_text(address, address + PAGE_SIZE))
21994 prot = PAGE_KERNEL_EXEC;
21995
21996 pages_4k++;
21997@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
21998
21999 pud = pud_offset(pgd, va);
22000 pmd = pmd_offset(pud, va);
22001- if (!pmd_present(*pmd))
22002+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
22003 break;
22004
22005 pte = pte_offset_kernel(pmd, va);
22006@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
22007
22008 static void __init pagetable_init(void)
22009 {
22010- pgd_t *pgd_base = swapper_pg_dir;
22011-
22012- permanent_kmaps_init(pgd_base);
22013+ permanent_kmaps_init(swapper_pg_dir);
22014 }
22015
22016 #ifdef CONFIG_ACPI_SLEEP
22017@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
22018 * ACPI suspend needs this for resume, because things like the intel-agp
22019 * driver might have split up a kernel 4MB mapping.
22020 */
22021-char swsusp_pg_dir[PAGE_SIZE]
22022+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
22023 __attribute__ ((aligned(PAGE_SIZE)));
22024
22025 static inline void save_pg_dir(void)
22026 {
22027- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
22028+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
22029 }
22030 #else /* !CONFIG_ACPI_SLEEP */
22031 static inline void save_pg_dir(void)
22032@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
22033 flush_tlb_all();
22034 }
22035
22036-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22037+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22038 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22039
22040 /* user-defined highmem size */
22041@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
22042 * Initialize the boot-time allocator (with low memory only):
22043 */
22044 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
22045- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
22046+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
22047 PAGE_SIZE);
22048 if (bootmap == -1L)
22049 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
22050@@ -864,6 +863,12 @@ void __init mem_init(void)
22051
22052 pci_iommu_alloc();
22053
22054+#ifdef CONFIG_PAX_PER_CPU_PGD
22055+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22056+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22057+ KERNEL_PGD_PTRS);
22058+#endif
22059+
22060 #ifdef CONFIG_FLATMEM
22061 BUG_ON(!mem_map);
22062 #endif
22063@@ -881,7 +886,7 @@ void __init mem_init(void)
22064 set_highmem_pages_init();
22065
22066 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22067- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22068+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22069 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22070
22071 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
22072@@ -923,10 +928,10 @@ void __init mem_init(void)
22073 ((unsigned long)&__init_end -
22074 (unsigned long)&__init_begin) >> 10,
22075
22076- (unsigned long)&_etext, (unsigned long)&_edata,
22077- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22078+ (unsigned long)&_sdata, (unsigned long)&_edata,
22079+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22080
22081- (unsigned long)&_text, (unsigned long)&_etext,
22082+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22083 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22084
22085 /*
22086@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
22087 if (!kernel_set_to_readonly)
22088 return;
22089
22090+ start = ktla_ktva(start);
22091 pr_debug("Set kernel text: %lx - %lx for read write\n",
22092 start, start+size);
22093
22094@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
22095 if (!kernel_set_to_readonly)
22096 return;
22097
22098+ start = ktla_ktva(start);
22099 pr_debug("Set kernel text: %lx - %lx for read only\n",
22100 start, start+size);
22101
22102@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
22103 unsigned long start = PFN_ALIGN(_text);
22104 unsigned long size = PFN_ALIGN(_etext) - start;
22105
22106+ start = ktla_ktva(start);
22107 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22108 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22109 size >> 10);
22110diff -urNp linux-2.6.32.46/arch/x86/mm/init_64.c linux-2.6.32.46/arch/x86/mm/init_64.c
22111--- linux-2.6.32.46/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
22112+++ linux-2.6.32.46/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
22113@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
22114 pmd = fill_pmd(pud, vaddr);
22115 pte = fill_pte(pmd, vaddr);
22116
22117+ pax_open_kernel();
22118 set_pte(pte, new_pte);
22119+ pax_close_kernel();
22120
22121 /*
22122 * It's enough to flush this one mapping.
22123@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
22124 pgd = pgd_offset_k((unsigned long)__va(phys));
22125 if (pgd_none(*pgd)) {
22126 pud = (pud_t *) spp_getpage();
22127- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22128- _PAGE_USER));
22129+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22130 }
22131 pud = pud_offset(pgd, (unsigned long)__va(phys));
22132 if (pud_none(*pud)) {
22133 pmd = (pmd_t *) spp_getpage();
22134- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22135- _PAGE_USER));
22136+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22137 }
22138 pmd = pmd_offset(pud, phys);
22139 BUG_ON(!pmd_none(*pmd));
22140@@ -675,6 +675,12 @@ void __init mem_init(void)
22141
22142 pci_iommu_alloc();
22143
22144+#ifdef CONFIG_PAX_PER_CPU_PGD
22145+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22146+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22147+ KERNEL_PGD_PTRS);
22148+#endif
22149+
22150 /* clear_bss() already clear the empty_zero_page */
22151
22152 reservedpages = 0;
22153@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
22154 static struct vm_area_struct gate_vma = {
22155 .vm_start = VSYSCALL_START,
22156 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22157- .vm_page_prot = PAGE_READONLY_EXEC,
22158- .vm_flags = VM_READ | VM_EXEC
22159+ .vm_page_prot = PAGE_READONLY,
22160+ .vm_flags = VM_READ
22161 };
22162
22163 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
22164@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
22165
22166 const char *arch_vma_name(struct vm_area_struct *vma)
22167 {
22168- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22169+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22170 return "[vdso]";
22171 if (vma == &gate_vma)
22172 return "[vsyscall]";
22173diff -urNp linux-2.6.32.46/arch/x86/mm/init.c linux-2.6.32.46/arch/x86/mm/init.c
22174--- linux-2.6.32.46/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
22175+++ linux-2.6.32.46/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
22176@@ -69,11 +69,7 @@ static void __init find_early_table_spac
22177 * cause a hotspot and fill up ZONE_DMA. The page tables
22178 * need roughly 0.5KB per GB.
22179 */
22180-#ifdef CONFIG_X86_32
22181- start = 0x7000;
22182-#else
22183- start = 0x8000;
22184-#endif
22185+ start = 0x100000;
22186 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
22187 tables, PAGE_SIZE);
22188 if (e820_table_start == -1UL)
22189@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
22190 #endif
22191
22192 set_nx();
22193- if (nx_enabled)
22194+ if (nx_enabled && cpu_has_nx)
22195 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
22196
22197 /* Enable PSE if available */
22198@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
22199 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
22200 * mmio resources as well as potential bios/acpi data regions.
22201 */
22202+
22203 int devmem_is_allowed(unsigned long pagenr)
22204 {
22205+#ifdef CONFIG_GRKERNSEC_KMEM
22206+ /* allow BDA */
22207+ if (!pagenr)
22208+ return 1;
22209+ /* allow EBDA */
22210+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22211+ return 1;
22212+ /* allow ISA/video mem */
22213+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22214+ return 1;
22215+ /* throw out everything else below 1MB */
22216+ if (pagenr <= 256)
22217+ return 0;
22218+#else
22219 if (pagenr <= 256)
22220 return 1;
22221+#endif
22222+
22223 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22224 return 0;
22225 if (!page_is_ram(pagenr))
22226@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
22227
22228 void free_initmem(void)
22229 {
22230+
22231+#ifdef CONFIG_PAX_KERNEXEC
22232+#ifdef CONFIG_X86_32
22233+ /* PaX: limit KERNEL_CS to actual size */
22234+ unsigned long addr, limit;
22235+ struct desc_struct d;
22236+ int cpu;
22237+
22238+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22239+ limit = (limit - 1UL) >> PAGE_SHIFT;
22240+
22241+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22242+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
22243+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22244+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22245+ }
22246+
22247+ /* PaX: make KERNEL_CS read-only */
22248+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22249+ if (!paravirt_enabled())
22250+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22251+/*
22252+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22253+ pgd = pgd_offset_k(addr);
22254+ pud = pud_offset(pgd, addr);
22255+ pmd = pmd_offset(pud, addr);
22256+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22257+ }
22258+*/
22259+#ifdef CONFIG_X86_PAE
22260+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22261+/*
22262+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22263+ pgd = pgd_offset_k(addr);
22264+ pud = pud_offset(pgd, addr);
22265+ pmd = pmd_offset(pud, addr);
22266+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22267+ }
22268+*/
22269+#endif
22270+
22271+#ifdef CONFIG_MODULES
22272+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22273+#endif
22274+
22275+#else
22276+ pgd_t *pgd;
22277+ pud_t *pud;
22278+ pmd_t *pmd;
22279+ unsigned long addr, end;
22280+
22281+ /* PaX: make kernel code/rodata read-only, rest non-executable */
22282+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22283+ pgd = pgd_offset_k(addr);
22284+ pud = pud_offset(pgd, addr);
22285+ pmd = pmd_offset(pud, addr);
22286+ if (!pmd_present(*pmd))
22287+ continue;
22288+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22289+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22290+ else
22291+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22292+ }
22293+
22294+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22295+ end = addr + KERNEL_IMAGE_SIZE;
22296+ for (; addr < end; addr += PMD_SIZE) {
22297+ pgd = pgd_offset_k(addr);
22298+ pud = pud_offset(pgd, addr);
22299+ pmd = pmd_offset(pud, addr);
22300+ if (!pmd_present(*pmd))
22301+ continue;
22302+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22303+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22304+ }
22305+#endif
22306+
22307+ flush_tlb_all();
22308+#endif
22309+
22310 free_init_pages("unused kernel memory",
22311 (unsigned long)(&__init_begin),
22312 (unsigned long)(&__init_end));
22313diff -urNp linux-2.6.32.46/arch/x86/mm/iomap_32.c linux-2.6.32.46/arch/x86/mm/iomap_32.c
22314--- linux-2.6.32.46/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
22315+++ linux-2.6.32.46/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
22316@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
22317 debug_kmap_atomic(type);
22318 idx = type + KM_TYPE_NR * smp_processor_id();
22319 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22320+
22321+ pax_open_kernel();
22322 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22323+ pax_close_kernel();
22324+
22325 arch_flush_lazy_mmu_mode();
22326
22327 return (void *)vaddr;
22328diff -urNp linux-2.6.32.46/arch/x86/mm/ioremap.c linux-2.6.32.46/arch/x86/mm/ioremap.c
22329--- linux-2.6.32.46/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
22330+++ linux-2.6.32.46/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
22331@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
22332 * Second special case: Some BIOSen report the PC BIOS
22333 * area (640->1Mb) as ram even though it is not.
22334 */
22335- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
22336- pagenr < (BIOS_END >> PAGE_SHIFT))
22337+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
22338+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22339 return 0;
22340
22341 for (i = 0; i < e820.nr_map; i++) {
22342@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
22343 /*
22344 * Don't allow anybody to remap normal RAM that we're using..
22345 */
22346- for (pfn = phys_addr >> PAGE_SHIFT;
22347- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
22348- pfn++) {
22349-
22350+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
22351 int is_ram = page_is_ram(pfn);
22352
22353- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22354+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22355 return NULL;
22356 WARN_ON_ONCE(is_ram);
22357 }
22358@@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
22359 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22360
22361 static __initdata int after_paging_init;
22362-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22363+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22364
22365 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22366 {
22367@@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
22368 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22369
22370 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22371- memset(bm_pte, 0, sizeof(bm_pte));
22372- pmd_populate_kernel(&init_mm, pmd, bm_pte);
22373+ pmd_populate_user(&init_mm, pmd, bm_pte);
22374
22375 /*
22376 * The boot-ioremap range spans multiple pmds, for which
22377diff -urNp linux-2.6.32.46/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.46/arch/x86/mm/kmemcheck/kmemcheck.c
22378--- linux-2.6.32.46/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
22379+++ linux-2.6.32.46/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
22380@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
22381 * memory (e.g. tracked pages)? For now, we need this to avoid
22382 * invoking kmemcheck for PnP BIOS calls.
22383 */
22384- if (regs->flags & X86_VM_MASK)
22385+ if (v8086_mode(regs))
22386 return false;
22387- if (regs->cs != __KERNEL_CS)
22388+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
22389 return false;
22390
22391 pte = kmemcheck_pte_lookup(address);
22392diff -urNp linux-2.6.32.46/arch/x86/mm/mmap.c linux-2.6.32.46/arch/x86/mm/mmap.c
22393--- linux-2.6.32.46/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
22394+++ linux-2.6.32.46/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
22395@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
22396 * Leave an at least ~128 MB hole with possible stack randomization.
22397 */
22398 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
22399-#define MAX_GAP (TASK_SIZE/6*5)
22400+#define MAX_GAP (pax_task_size/6*5)
22401
22402 /*
22403 * True on X86_32 or when emulating IA32 on X86_64
22404@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
22405 return rnd << PAGE_SHIFT;
22406 }
22407
22408-static unsigned long mmap_base(void)
22409+static unsigned long mmap_base(struct mm_struct *mm)
22410 {
22411 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
22412+ unsigned long pax_task_size = TASK_SIZE;
22413+
22414+#ifdef CONFIG_PAX_SEGMEXEC
22415+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22416+ pax_task_size = SEGMEXEC_TASK_SIZE;
22417+#endif
22418
22419 if (gap < MIN_GAP)
22420 gap = MIN_GAP;
22421 else if (gap > MAX_GAP)
22422 gap = MAX_GAP;
22423
22424- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
22425+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
22426 }
22427
22428 /*
22429 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
22430 * does, but not when emulating X86_32
22431 */
22432-static unsigned long mmap_legacy_base(void)
22433+static unsigned long mmap_legacy_base(struct mm_struct *mm)
22434 {
22435- if (mmap_is_ia32())
22436+ if (mmap_is_ia32()) {
22437+
22438+#ifdef CONFIG_PAX_SEGMEXEC
22439+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22440+ return SEGMEXEC_TASK_UNMAPPED_BASE;
22441+ else
22442+#endif
22443+
22444 return TASK_UNMAPPED_BASE;
22445- else
22446+ } else
22447 return TASK_UNMAPPED_BASE + mmap_rnd();
22448 }
22449
22450@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
22451 void arch_pick_mmap_layout(struct mm_struct *mm)
22452 {
22453 if (mmap_is_legacy()) {
22454- mm->mmap_base = mmap_legacy_base();
22455+ mm->mmap_base = mmap_legacy_base(mm);
22456+
22457+#ifdef CONFIG_PAX_RANDMMAP
22458+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22459+ mm->mmap_base += mm->delta_mmap;
22460+#endif
22461+
22462 mm->get_unmapped_area = arch_get_unmapped_area;
22463 mm->unmap_area = arch_unmap_area;
22464 } else {
22465- mm->mmap_base = mmap_base();
22466+ mm->mmap_base = mmap_base(mm);
22467+
22468+#ifdef CONFIG_PAX_RANDMMAP
22469+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22470+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
22471+#endif
22472+
22473 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
22474 mm->unmap_area = arch_unmap_area_topdown;
22475 }
22476diff -urNp linux-2.6.32.46/arch/x86/mm/mmio-mod.c linux-2.6.32.46/arch/x86/mm/mmio-mod.c
22477--- linux-2.6.32.46/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
22478+++ linux-2.6.32.46/arch/x86/mm/mmio-mod.c 2011-07-06 19:53:33.000000000 -0400
22479@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, s
22480 break;
22481 default:
22482 {
22483- unsigned char *ip = (unsigned char *)instptr;
22484+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
22485 my_trace->opcode = MMIO_UNKNOWN_OP;
22486 my_trace->width = 0;
22487 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
22488@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
22489 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
22490 void __iomem *addr)
22491 {
22492- static atomic_t next_id;
22493+ static atomic_unchecked_t next_id;
22494 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
22495 /* These are page-unaligned. */
22496 struct mmiotrace_map map = {
22497@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
22498 .private = trace
22499 },
22500 .phys = offset,
22501- .id = atomic_inc_return(&next_id)
22502+ .id = atomic_inc_return_unchecked(&next_id)
22503 };
22504 map.map_id = trace->id;
22505
22506diff -urNp linux-2.6.32.46/arch/x86/mm/numa_32.c linux-2.6.32.46/arch/x86/mm/numa_32.c
22507--- linux-2.6.32.46/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
22508+++ linux-2.6.32.46/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
22509@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
22510 }
22511 #endif
22512
22513-extern unsigned long find_max_low_pfn(void);
22514 extern unsigned long highend_pfn, highstart_pfn;
22515
22516 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
22517diff -urNp linux-2.6.32.46/arch/x86/mm/pageattr.c linux-2.6.32.46/arch/x86/mm/pageattr.c
22518--- linux-2.6.32.46/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
22519+++ linux-2.6.32.46/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
22520@@ -261,16 +261,17 @@ static inline pgprot_t static_protection
22521 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
22522 */
22523 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
22524- pgprot_val(forbidden) |= _PAGE_NX;
22525+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22526
22527 /*
22528 * The kernel text needs to be executable for obvious reasons
22529 * Does not cover __inittext since that is gone later on. On
22530 * 64bit we do not enforce !NX on the low mapping
22531 */
22532- if (within(address, (unsigned long)_text, (unsigned long)_etext))
22533- pgprot_val(forbidden) |= _PAGE_NX;
22534+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
22535+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22536
22537+#ifdef CONFIG_DEBUG_RODATA
22538 /*
22539 * The .rodata section needs to be read-only. Using the pfn
22540 * catches all aliases.
22541@@ -278,6 +279,14 @@ static inline pgprot_t static_protection
22542 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
22543 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
22544 pgprot_val(forbidden) |= _PAGE_RW;
22545+#endif
22546+
22547+#ifdef CONFIG_PAX_KERNEXEC
22548+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
22549+ pgprot_val(forbidden) |= _PAGE_RW;
22550+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22551+ }
22552+#endif
22553
22554 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
22555
22556@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
22557 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
22558 {
22559 /* change init_mm */
22560+ pax_open_kernel();
22561 set_pte_atomic(kpte, pte);
22562+
22563 #ifdef CONFIG_X86_32
22564 if (!SHARED_KERNEL_PMD) {
22565+
22566+#ifdef CONFIG_PAX_PER_CPU_PGD
22567+ unsigned long cpu;
22568+#else
22569 struct page *page;
22570+#endif
22571
22572+#ifdef CONFIG_PAX_PER_CPU_PGD
22573+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22574+ pgd_t *pgd = get_cpu_pgd(cpu);
22575+#else
22576 list_for_each_entry(page, &pgd_list, lru) {
22577- pgd_t *pgd;
22578+ pgd_t *pgd = (pgd_t *)page_address(page);
22579+#endif
22580+
22581 pud_t *pud;
22582 pmd_t *pmd;
22583
22584- pgd = (pgd_t *)page_address(page) + pgd_index(address);
22585+ pgd += pgd_index(address);
22586 pud = pud_offset(pgd, address);
22587 pmd = pmd_offset(pud, address);
22588 set_pte_atomic((pte_t *)pmd, pte);
22589 }
22590 }
22591 #endif
22592+ pax_close_kernel();
22593 }
22594
22595 static int
22596diff -urNp linux-2.6.32.46/arch/x86/mm/pageattr-test.c linux-2.6.32.46/arch/x86/mm/pageattr-test.c
22597--- linux-2.6.32.46/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
22598+++ linux-2.6.32.46/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
22599@@ -36,7 +36,7 @@ enum {
22600
22601 static int pte_testbit(pte_t pte)
22602 {
22603- return pte_flags(pte) & _PAGE_UNUSED1;
22604+ return pte_flags(pte) & _PAGE_CPA_TEST;
22605 }
22606
22607 struct split_state {
22608diff -urNp linux-2.6.32.46/arch/x86/mm/pat.c linux-2.6.32.46/arch/x86/mm/pat.c
22609--- linux-2.6.32.46/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
22610+++ linux-2.6.32.46/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
22611@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
22612
22613 conflict:
22614 printk(KERN_INFO "%s:%d conflicting memory types "
22615- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
22616+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
22617 new->end, cattr_name(new->type), cattr_name(entry->type));
22618 return -EBUSY;
22619 }
22620@@ -559,7 +559,7 @@ unlock_ret:
22621
22622 if (err) {
22623 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
22624- current->comm, current->pid, start, end);
22625+ current->comm, task_pid_nr(current), start, end);
22626 }
22627
22628 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
22629@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
22630 while (cursor < to) {
22631 if (!devmem_is_allowed(pfn)) {
22632 printk(KERN_INFO
22633- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
22634- current->comm, from, to);
22635+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
22636+ current->comm, from, to, cursor);
22637 return 0;
22638 }
22639 cursor += PAGE_SIZE;
22640@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
22641 printk(KERN_INFO
22642 "%s:%d ioremap_change_attr failed %s "
22643 "for %Lx-%Lx\n",
22644- current->comm, current->pid,
22645+ current->comm, task_pid_nr(current),
22646 cattr_name(flags),
22647 base, (unsigned long long)(base + size));
22648 return -EINVAL;
22649@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
22650 free_memtype(paddr, paddr + size);
22651 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
22652 " for %Lx-%Lx, got %s\n",
22653- current->comm, current->pid,
22654+ current->comm, task_pid_nr(current),
22655 cattr_name(want_flags),
22656 (unsigned long long)paddr,
22657 (unsigned long long)(paddr + size),
22658diff -urNp linux-2.6.32.46/arch/x86/mm/pf_in.c linux-2.6.32.46/arch/x86/mm/pf_in.c
22659--- linux-2.6.32.46/arch/x86/mm/pf_in.c 2011-03-27 14:31:47.000000000 -0400
22660+++ linux-2.6.32.46/arch/x86/mm/pf_in.c 2011-07-06 19:53:33.000000000 -0400
22661@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
22662 int i;
22663 enum reason_type rv = OTHERS;
22664
22665- p = (unsigned char *)ins_addr;
22666+ p = (unsigned char *)ktla_ktva(ins_addr);
22667 p += skip_prefix(p, &prf);
22668 p += get_opcode(p, &opcode);
22669
22670@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
22671 struct prefix_bits prf;
22672 int i;
22673
22674- p = (unsigned char *)ins_addr;
22675+ p = (unsigned char *)ktla_ktva(ins_addr);
22676 p += skip_prefix(p, &prf);
22677 p += get_opcode(p, &opcode);
22678
22679@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
22680 struct prefix_bits prf;
22681 int i;
22682
22683- p = (unsigned char *)ins_addr;
22684+ p = (unsigned char *)ktla_ktva(ins_addr);
22685 p += skip_prefix(p, &prf);
22686 p += get_opcode(p, &opcode);
22687
22688@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned l
22689 int i;
22690 unsigned long rv;
22691
22692- p = (unsigned char *)ins_addr;
22693+ p = (unsigned char *)ktla_ktva(ins_addr);
22694 p += skip_prefix(p, &prf);
22695 p += get_opcode(p, &opcode);
22696 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
22697@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned l
22698 int i;
22699 unsigned long rv;
22700
22701- p = (unsigned char *)ins_addr;
22702+ p = (unsigned char *)ktla_ktva(ins_addr);
22703 p += skip_prefix(p, &prf);
22704 p += get_opcode(p, &opcode);
22705 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
22706diff -urNp linux-2.6.32.46/arch/x86/mm/pgtable_32.c linux-2.6.32.46/arch/x86/mm/pgtable_32.c
22707--- linux-2.6.32.46/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
22708+++ linux-2.6.32.46/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
22709@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
22710 return;
22711 }
22712 pte = pte_offset_kernel(pmd, vaddr);
22713+
22714+ pax_open_kernel();
22715 if (pte_val(pteval))
22716 set_pte_at(&init_mm, vaddr, pte, pteval);
22717 else
22718 pte_clear(&init_mm, vaddr, pte);
22719+ pax_close_kernel();
22720
22721 /*
22722 * It's enough to flush this one mapping.
22723diff -urNp linux-2.6.32.46/arch/x86/mm/pgtable.c linux-2.6.32.46/arch/x86/mm/pgtable.c
22724--- linux-2.6.32.46/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
22725+++ linux-2.6.32.46/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
22726@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
22727 list_del(&page->lru);
22728 }
22729
22730-#define UNSHARED_PTRS_PER_PGD \
22731- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22732+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22733+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
22734
22735+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22736+{
22737+ while (count--)
22738+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
22739+}
22740+#endif
22741+
22742+#ifdef CONFIG_PAX_PER_CPU_PGD
22743+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22744+{
22745+ while (count--)
22746+
22747+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22748+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
22749+#else
22750+ *dst++ = *src++;
22751+#endif
22752+
22753+}
22754+#endif
22755+
22756+#ifdef CONFIG_X86_64
22757+#define pxd_t pud_t
22758+#define pyd_t pgd_t
22759+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
22760+#define pxd_free(mm, pud) pud_free((mm), (pud))
22761+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
22762+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
22763+#define PYD_SIZE PGDIR_SIZE
22764+#else
22765+#define pxd_t pmd_t
22766+#define pyd_t pud_t
22767+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
22768+#define pxd_free(mm, pud) pmd_free((mm), (pud))
22769+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
22770+#define pyd_offset(mm ,address) pud_offset((mm), (address))
22771+#define PYD_SIZE PUD_SIZE
22772+#endif
22773+
22774+#ifdef CONFIG_PAX_PER_CPU_PGD
22775+static inline void pgd_ctor(pgd_t *pgd) {}
22776+static inline void pgd_dtor(pgd_t *pgd) {}
22777+#else
22778 static void pgd_ctor(pgd_t *pgd)
22779 {
22780 /* If the pgd points to a shared pagetable level (either the
22781@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
22782 pgd_list_del(pgd);
22783 spin_unlock_irqrestore(&pgd_lock, flags);
22784 }
22785+#endif
22786
22787 /*
22788 * List of all pgd's needed for non-PAE so it can invalidate entries
22789@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
22790 * -- wli
22791 */
22792
22793-#ifdef CONFIG_X86_PAE
22794+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22795 /*
22796 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
22797 * updating the top-level pagetable entries to guarantee the
22798@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
22799 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
22800 * and initialize the kernel pmds here.
22801 */
22802-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
22803+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22804
22805 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
22806 {
22807@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
22808 */
22809 flush_tlb_mm(mm);
22810 }
22811+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
22812+#define PREALLOCATED_PXDS USER_PGD_PTRS
22813 #else /* !CONFIG_X86_PAE */
22814
22815 /* No need to prepopulate any pagetable entries in non-PAE modes. */
22816-#define PREALLOCATED_PMDS 0
22817+#define PREALLOCATED_PXDS 0
22818
22819 #endif /* CONFIG_X86_PAE */
22820
22821-static void free_pmds(pmd_t *pmds[])
22822+static void free_pxds(pxd_t *pxds[])
22823 {
22824 int i;
22825
22826- for(i = 0; i < PREALLOCATED_PMDS; i++)
22827- if (pmds[i])
22828- free_page((unsigned long)pmds[i]);
22829+ for(i = 0; i < PREALLOCATED_PXDS; i++)
22830+ if (pxds[i])
22831+ free_page((unsigned long)pxds[i]);
22832 }
22833
22834-static int preallocate_pmds(pmd_t *pmds[])
22835+static int preallocate_pxds(pxd_t *pxds[])
22836 {
22837 int i;
22838 bool failed = false;
22839
22840- for(i = 0; i < PREALLOCATED_PMDS; i++) {
22841- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
22842- if (pmd == NULL)
22843+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
22844+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
22845+ if (pxd == NULL)
22846 failed = true;
22847- pmds[i] = pmd;
22848+ pxds[i] = pxd;
22849 }
22850
22851 if (failed) {
22852- free_pmds(pmds);
22853+ free_pxds(pxds);
22854 return -ENOMEM;
22855 }
22856
22857@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
22858 * preallocate which never got a corresponding vma will need to be
22859 * freed manually.
22860 */
22861-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
22862+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
22863 {
22864 int i;
22865
22866- for(i = 0; i < PREALLOCATED_PMDS; i++) {
22867+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
22868 pgd_t pgd = pgdp[i];
22869
22870 if (pgd_val(pgd) != 0) {
22871- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
22872+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
22873
22874- pgdp[i] = native_make_pgd(0);
22875+ set_pgd(pgdp + i, native_make_pgd(0));
22876
22877- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
22878- pmd_free(mm, pmd);
22879+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
22880+ pxd_free(mm, pxd);
22881 }
22882 }
22883 }
22884
22885-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
22886+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
22887 {
22888- pud_t *pud;
22889+ pyd_t *pyd;
22890 unsigned long addr;
22891 int i;
22892
22893- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
22894+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
22895 return;
22896
22897- pud = pud_offset(pgd, 0);
22898+#ifdef CONFIG_X86_64
22899+ pyd = pyd_offset(mm, 0L);
22900+#else
22901+ pyd = pyd_offset(pgd, 0L);
22902+#endif
22903
22904- for (addr = i = 0; i < PREALLOCATED_PMDS;
22905- i++, pud++, addr += PUD_SIZE) {
22906- pmd_t *pmd = pmds[i];
22907+ for (addr = i = 0; i < PREALLOCATED_PXDS;
22908+ i++, pyd++, addr += PYD_SIZE) {
22909+ pxd_t *pxd = pxds[i];
22910
22911 if (i >= KERNEL_PGD_BOUNDARY)
22912- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22913- sizeof(pmd_t) * PTRS_PER_PMD);
22914+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22915+ sizeof(pxd_t) * PTRS_PER_PMD);
22916
22917- pud_populate(mm, pud, pmd);
22918+ pyd_populate(mm, pyd, pxd);
22919 }
22920 }
22921
22922 pgd_t *pgd_alloc(struct mm_struct *mm)
22923 {
22924 pgd_t *pgd;
22925- pmd_t *pmds[PREALLOCATED_PMDS];
22926+ pxd_t *pxds[PREALLOCATED_PXDS];
22927+
22928 unsigned long flags;
22929
22930 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
22931@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22932
22933 mm->pgd = pgd;
22934
22935- if (preallocate_pmds(pmds) != 0)
22936+ if (preallocate_pxds(pxds) != 0)
22937 goto out_free_pgd;
22938
22939 if (paravirt_pgd_alloc(mm) != 0)
22940- goto out_free_pmds;
22941+ goto out_free_pxds;
22942
22943 /*
22944 * Make sure that pre-populating the pmds is atomic with
22945@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22946 spin_lock_irqsave(&pgd_lock, flags);
22947
22948 pgd_ctor(pgd);
22949- pgd_prepopulate_pmd(mm, pgd, pmds);
22950+ pgd_prepopulate_pxd(mm, pgd, pxds);
22951
22952 spin_unlock_irqrestore(&pgd_lock, flags);
22953
22954 return pgd;
22955
22956-out_free_pmds:
22957- free_pmds(pmds);
22958+out_free_pxds:
22959+ free_pxds(pxds);
22960 out_free_pgd:
22961 free_page((unsigned long)pgd);
22962 out:
22963@@ -287,7 +338,7 @@ out:
22964
22965 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
22966 {
22967- pgd_mop_up_pmds(mm, pgd);
22968+ pgd_mop_up_pxds(mm, pgd);
22969 pgd_dtor(pgd);
22970 paravirt_pgd_free(mm, pgd);
22971 free_page((unsigned long)pgd);
22972diff -urNp linux-2.6.32.46/arch/x86/mm/setup_nx.c linux-2.6.32.46/arch/x86/mm/setup_nx.c
22973--- linux-2.6.32.46/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
22974+++ linux-2.6.32.46/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
22975@@ -4,11 +4,10 @@
22976
22977 #include <asm/pgtable.h>
22978
22979+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22980 int nx_enabled;
22981
22982-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22983-static int disable_nx __cpuinitdata;
22984-
22985+#ifndef CONFIG_PAX_PAGEEXEC
22986 /*
22987 * noexec = on|off
22988 *
22989@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
22990 if (!str)
22991 return -EINVAL;
22992 if (!strncmp(str, "on", 2)) {
22993- __supported_pte_mask |= _PAGE_NX;
22994- disable_nx = 0;
22995+ nx_enabled = 1;
22996 } else if (!strncmp(str, "off", 3)) {
22997- disable_nx = 1;
22998- __supported_pte_mask &= ~_PAGE_NX;
22999+ nx_enabled = 0;
23000 }
23001 return 0;
23002 }
23003 early_param("noexec", noexec_setup);
23004 #endif
23005+#endif
23006
23007 #ifdef CONFIG_X86_PAE
23008 void __init set_nx(void)
23009 {
23010- unsigned int v[4], l, h;
23011+ if (!nx_enabled && cpu_has_nx) {
23012+ unsigned l, h;
23013
23014- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
23015- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
23016-
23017- if ((v[3] & (1 << 20)) && !disable_nx) {
23018- rdmsr(MSR_EFER, l, h);
23019- l |= EFER_NX;
23020- wrmsr(MSR_EFER, l, h);
23021- nx_enabled = 1;
23022- __supported_pte_mask |= _PAGE_NX;
23023- }
23024+ __supported_pte_mask &= ~_PAGE_NX;
23025+ rdmsr(MSR_EFER, l, h);
23026+ l &= ~EFER_NX;
23027+ wrmsr(MSR_EFER, l, h);
23028 }
23029 }
23030 #else
23031@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
23032 unsigned long efer;
23033
23034 rdmsrl(MSR_EFER, efer);
23035- if (!(efer & EFER_NX) || disable_nx)
23036+ if (!(efer & EFER_NX) || !nx_enabled)
23037 __supported_pte_mask &= ~_PAGE_NX;
23038 }
23039 #endif
23040diff -urNp linux-2.6.32.46/arch/x86/mm/tlb.c linux-2.6.32.46/arch/x86/mm/tlb.c
23041--- linux-2.6.32.46/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
23042+++ linux-2.6.32.46/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
23043@@ -61,7 +61,11 @@ void leave_mm(int cpu)
23044 BUG();
23045 cpumask_clear_cpu(cpu,
23046 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23047+
23048+#ifndef CONFIG_PAX_PER_CPU_PGD
23049 load_cr3(swapper_pg_dir);
23050+#endif
23051+
23052 }
23053 EXPORT_SYMBOL_GPL(leave_mm);
23054
23055diff -urNp linux-2.6.32.46/arch/x86/oprofile/backtrace.c linux-2.6.32.46/arch/x86/oprofile/backtrace.c
23056--- linux-2.6.32.46/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
23057+++ linux-2.6.32.46/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
23058@@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
23059 struct frame_head bufhead[2];
23060
23061 /* Also check accessibility of one struct frame_head beyond */
23062- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
23063+ if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
23064 return NULL;
23065 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
23066 return NULL;
23067@@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
23068 {
23069 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
23070
23071- if (!user_mode_vm(regs)) {
23072+ if (!user_mode(regs)) {
23073 unsigned long stack = kernel_stack_pointer(regs);
23074 if (depth)
23075 dump_trace(NULL, regs, (unsigned long *)stack, 0,
23076diff -urNp linux-2.6.32.46/arch/x86/oprofile/op_model_p4.c linux-2.6.32.46/arch/x86/oprofile/op_model_p4.c
23077--- linux-2.6.32.46/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
23078+++ linux-2.6.32.46/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
23079@@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
23080 #endif
23081 }
23082
23083-static int inline addr_increment(void)
23084+static inline int addr_increment(void)
23085 {
23086 #ifdef CONFIG_SMP
23087 return smp_num_siblings == 2 ? 2 : 1;
23088diff -urNp linux-2.6.32.46/arch/x86/pci/common.c linux-2.6.32.46/arch/x86/pci/common.c
23089--- linux-2.6.32.46/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
23090+++ linux-2.6.32.46/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
23091@@ -31,8 +31,8 @@ int noioapicreroute = 1;
23092 int pcibios_last_bus = -1;
23093 unsigned long pirq_table_addr;
23094 struct pci_bus *pci_root_bus;
23095-struct pci_raw_ops *raw_pci_ops;
23096-struct pci_raw_ops *raw_pci_ext_ops;
23097+const struct pci_raw_ops *raw_pci_ops;
23098+const struct pci_raw_ops *raw_pci_ext_ops;
23099
23100 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
23101 int reg, int len, u32 *val)
23102diff -urNp linux-2.6.32.46/arch/x86/pci/direct.c linux-2.6.32.46/arch/x86/pci/direct.c
23103--- linux-2.6.32.46/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
23104+++ linux-2.6.32.46/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
23105@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
23106
23107 #undef PCI_CONF1_ADDRESS
23108
23109-struct pci_raw_ops pci_direct_conf1 = {
23110+const struct pci_raw_ops pci_direct_conf1 = {
23111 .read = pci_conf1_read,
23112 .write = pci_conf1_write,
23113 };
23114@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
23115
23116 #undef PCI_CONF2_ADDRESS
23117
23118-struct pci_raw_ops pci_direct_conf2 = {
23119+const struct pci_raw_ops pci_direct_conf2 = {
23120 .read = pci_conf2_read,
23121 .write = pci_conf2_write,
23122 };
23123@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
23124 * This should be close to trivial, but it isn't, because there are buggy
23125 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
23126 */
23127-static int __init pci_sanity_check(struct pci_raw_ops *o)
23128+static int __init pci_sanity_check(const struct pci_raw_ops *o)
23129 {
23130 u32 x = 0;
23131 int year, devfn;
23132diff -urNp linux-2.6.32.46/arch/x86/pci/mmconfig_32.c linux-2.6.32.46/arch/x86/pci/mmconfig_32.c
23133--- linux-2.6.32.46/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
23134+++ linux-2.6.32.46/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
23135@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
23136 return 0;
23137 }
23138
23139-static struct pci_raw_ops pci_mmcfg = {
23140+static const struct pci_raw_ops pci_mmcfg = {
23141 .read = pci_mmcfg_read,
23142 .write = pci_mmcfg_write,
23143 };
23144diff -urNp linux-2.6.32.46/arch/x86/pci/mmconfig_64.c linux-2.6.32.46/arch/x86/pci/mmconfig_64.c
23145--- linux-2.6.32.46/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
23146+++ linux-2.6.32.46/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
23147@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
23148 return 0;
23149 }
23150
23151-static struct pci_raw_ops pci_mmcfg = {
23152+static const struct pci_raw_ops pci_mmcfg = {
23153 .read = pci_mmcfg_read,
23154 .write = pci_mmcfg_write,
23155 };
23156diff -urNp linux-2.6.32.46/arch/x86/pci/numaq_32.c linux-2.6.32.46/arch/x86/pci/numaq_32.c
23157--- linux-2.6.32.46/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
23158+++ linux-2.6.32.46/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
23159@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
23160
23161 #undef PCI_CONF1_MQ_ADDRESS
23162
23163-static struct pci_raw_ops pci_direct_conf1_mq = {
23164+static const struct pci_raw_ops pci_direct_conf1_mq = {
23165 .read = pci_conf1_mq_read,
23166 .write = pci_conf1_mq_write
23167 };
23168diff -urNp linux-2.6.32.46/arch/x86/pci/olpc.c linux-2.6.32.46/arch/x86/pci/olpc.c
23169--- linux-2.6.32.46/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
23170+++ linux-2.6.32.46/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
23171@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
23172 return 0;
23173 }
23174
23175-static struct pci_raw_ops pci_olpc_conf = {
23176+static const struct pci_raw_ops pci_olpc_conf = {
23177 .read = pci_olpc_read,
23178 .write = pci_olpc_write,
23179 };
23180diff -urNp linux-2.6.32.46/arch/x86/pci/pcbios.c linux-2.6.32.46/arch/x86/pci/pcbios.c
23181--- linux-2.6.32.46/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
23182+++ linux-2.6.32.46/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
23183@@ -56,50 +56,93 @@ union bios32 {
23184 static struct {
23185 unsigned long address;
23186 unsigned short segment;
23187-} bios32_indirect = { 0, __KERNEL_CS };
23188+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23189
23190 /*
23191 * Returns the entry point for the given service, NULL on error
23192 */
23193
23194-static unsigned long bios32_service(unsigned long service)
23195+static unsigned long __devinit bios32_service(unsigned long service)
23196 {
23197 unsigned char return_code; /* %al */
23198 unsigned long address; /* %ebx */
23199 unsigned long length; /* %ecx */
23200 unsigned long entry; /* %edx */
23201 unsigned long flags;
23202+ struct desc_struct d, *gdt;
23203
23204 local_irq_save(flags);
23205- __asm__("lcall *(%%edi); cld"
23206+
23207+ gdt = get_cpu_gdt_table(smp_processor_id());
23208+
23209+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23210+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23211+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23212+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23213+
23214+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23215 : "=a" (return_code),
23216 "=b" (address),
23217 "=c" (length),
23218 "=d" (entry)
23219 : "0" (service),
23220 "1" (0),
23221- "D" (&bios32_indirect));
23222+ "D" (&bios32_indirect),
23223+ "r"(__PCIBIOS_DS)
23224+ : "memory");
23225+
23226+ pax_open_kernel();
23227+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23228+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23229+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23230+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23231+ pax_close_kernel();
23232+
23233 local_irq_restore(flags);
23234
23235 switch (return_code) {
23236- case 0:
23237- return address + entry;
23238- case 0x80: /* Not present */
23239- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23240- return 0;
23241- default: /* Shouldn't happen */
23242- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23243- service, return_code);
23244+ case 0: {
23245+ int cpu;
23246+ unsigned char flags;
23247+
23248+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23249+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23250+ printk(KERN_WARNING "bios32_service: not valid\n");
23251 return 0;
23252+ }
23253+ address = address + PAGE_OFFSET;
23254+ length += 16UL; /* some BIOSs underreport this... */
23255+ flags = 4;
23256+ if (length >= 64*1024*1024) {
23257+ length >>= PAGE_SHIFT;
23258+ flags |= 8;
23259+ }
23260+
23261+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
23262+ gdt = get_cpu_gdt_table(cpu);
23263+ pack_descriptor(&d, address, length, 0x9b, flags);
23264+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23265+ pack_descriptor(&d, address, length, 0x93, flags);
23266+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23267+ }
23268+ return entry;
23269+ }
23270+ case 0x80: /* Not present */
23271+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23272+ return 0;
23273+ default: /* Shouldn't happen */
23274+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23275+ service, return_code);
23276+ return 0;
23277 }
23278 }
23279
23280 static struct {
23281 unsigned long address;
23282 unsigned short segment;
23283-} pci_indirect = { 0, __KERNEL_CS };
23284+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23285
23286-static int pci_bios_present;
23287+static int pci_bios_present __read_only;
23288
23289 static int __devinit check_pcibios(void)
23290 {
23291@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
23292 unsigned long flags, pcibios_entry;
23293
23294 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23295- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23296+ pci_indirect.address = pcibios_entry;
23297
23298 local_irq_save(flags);
23299- __asm__(
23300- "lcall *(%%edi); cld\n\t"
23301+ __asm__("movw %w6, %%ds\n\t"
23302+ "lcall *%%ss:(%%edi); cld\n\t"
23303+ "push %%ss\n\t"
23304+ "pop %%ds\n\t"
23305 "jc 1f\n\t"
23306 "xor %%ah, %%ah\n"
23307 "1:"
23308@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
23309 "=b" (ebx),
23310 "=c" (ecx)
23311 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23312- "D" (&pci_indirect)
23313+ "D" (&pci_indirect),
23314+ "r" (__PCIBIOS_DS)
23315 : "memory");
23316 local_irq_restore(flags);
23317
23318@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
23319
23320 switch (len) {
23321 case 1:
23322- __asm__("lcall *(%%esi); cld\n\t"
23323+ __asm__("movw %w6, %%ds\n\t"
23324+ "lcall *%%ss:(%%esi); cld\n\t"
23325+ "push %%ss\n\t"
23326+ "pop %%ds\n\t"
23327 "jc 1f\n\t"
23328 "xor %%ah, %%ah\n"
23329 "1:"
23330@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
23331 : "1" (PCIBIOS_READ_CONFIG_BYTE),
23332 "b" (bx),
23333 "D" ((long)reg),
23334- "S" (&pci_indirect));
23335+ "S" (&pci_indirect),
23336+ "r" (__PCIBIOS_DS));
23337 /*
23338 * Zero-extend the result beyond 8 bits, do not trust the
23339 * BIOS having done it:
23340@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
23341 *value &= 0xff;
23342 break;
23343 case 2:
23344- __asm__("lcall *(%%esi); cld\n\t"
23345+ __asm__("movw %w6, %%ds\n\t"
23346+ "lcall *%%ss:(%%esi); cld\n\t"
23347+ "push %%ss\n\t"
23348+ "pop %%ds\n\t"
23349 "jc 1f\n\t"
23350 "xor %%ah, %%ah\n"
23351 "1:"
23352@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
23353 : "1" (PCIBIOS_READ_CONFIG_WORD),
23354 "b" (bx),
23355 "D" ((long)reg),
23356- "S" (&pci_indirect));
23357+ "S" (&pci_indirect),
23358+ "r" (__PCIBIOS_DS));
23359 /*
23360 * Zero-extend the result beyond 16 bits, do not trust the
23361 * BIOS having done it:
23362@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
23363 *value &= 0xffff;
23364 break;
23365 case 4:
23366- __asm__("lcall *(%%esi); cld\n\t"
23367+ __asm__("movw %w6, %%ds\n\t"
23368+ "lcall *%%ss:(%%esi); cld\n\t"
23369+ "push %%ss\n\t"
23370+ "pop %%ds\n\t"
23371 "jc 1f\n\t"
23372 "xor %%ah, %%ah\n"
23373 "1:"
23374@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
23375 : "1" (PCIBIOS_READ_CONFIG_DWORD),
23376 "b" (bx),
23377 "D" ((long)reg),
23378- "S" (&pci_indirect));
23379+ "S" (&pci_indirect),
23380+ "r" (__PCIBIOS_DS));
23381 break;
23382 }
23383
23384@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
23385
23386 switch (len) {
23387 case 1:
23388- __asm__("lcall *(%%esi); cld\n\t"
23389+ __asm__("movw %w6, %%ds\n\t"
23390+ "lcall *%%ss:(%%esi); cld\n\t"
23391+ "push %%ss\n\t"
23392+ "pop %%ds\n\t"
23393 "jc 1f\n\t"
23394 "xor %%ah, %%ah\n"
23395 "1:"
23396@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
23397 "c" (value),
23398 "b" (bx),
23399 "D" ((long)reg),
23400- "S" (&pci_indirect));
23401+ "S" (&pci_indirect),
23402+ "r" (__PCIBIOS_DS));
23403 break;
23404 case 2:
23405- __asm__("lcall *(%%esi); cld\n\t"
23406+ __asm__("movw %w6, %%ds\n\t"
23407+ "lcall *%%ss:(%%esi); cld\n\t"
23408+ "push %%ss\n\t"
23409+ "pop %%ds\n\t"
23410 "jc 1f\n\t"
23411 "xor %%ah, %%ah\n"
23412 "1:"
23413@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
23414 "c" (value),
23415 "b" (bx),
23416 "D" ((long)reg),
23417- "S" (&pci_indirect));
23418+ "S" (&pci_indirect),
23419+ "r" (__PCIBIOS_DS));
23420 break;
23421 case 4:
23422- __asm__("lcall *(%%esi); cld\n\t"
23423+ __asm__("movw %w6, %%ds\n\t"
23424+ "lcall *%%ss:(%%esi); cld\n\t"
23425+ "push %%ss\n\t"
23426+ "pop %%ds\n\t"
23427 "jc 1f\n\t"
23428 "xor %%ah, %%ah\n"
23429 "1:"
23430@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
23431 "c" (value),
23432 "b" (bx),
23433 "D" ((long)reg),
23434- "S" (&pci_indirect));
23435+ "S" (&pci_indirect),
23436+ "r" (__PCIBIOS_DS));
23437 break;
23438 }
23439
23440@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
23441 * Function table for BIOS32 access
23442 */
23443
23444-static struct pci_raw_ops pci_bios_access = {
23445+static const struct pci_raw_ops pci_bios_access = {
23446 .read = pci_bios_read,
23447 .write = pci_bios_write
23448 };
23449@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
23450 * Try to find PCI BIOS.
23451 */
23452
23453-static struct pci_raw_ops * __devinit pci_find_bios(void)
23454+static const struct pci_raw_ops * __devinit pci_find_bios(void)
23455 {
23456 union bios32 *check;
23457 unsigned char sum;
23458@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
23459
23460 DBG("PCI: Fetching IRQ routing table... ");
23461 __asm__("push %%es\n\t"
23462+ "movw %w8, %%ds\n\t"
23463 "push %%ds\n\t"
23464 "pop %%es\n\t"
23465- "lcall *(%%esi); cld\n\t"
23466+ "lcall *%%ss:(%%esi); cld\n\t"
23467 "pop %%es\n\t"
23468+ "push %%ss\n\t"
23469+ "pop %%ds\n"
23470 "jc 1f\n\t"
23471 "xor %%ah, %%ah\n"
23472 "1:"
23473@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
23474 "1" (0),
23475 "D" ((long) &opt),
23476 "S" (&pci_indirect),
23477- "m" (opt)
23478+ "m" (opt),
23479+ "r" (__PCIBIOS_DS)
23480 : "memory");
23481 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
23482 if (ret & 0xff00)
23483@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
23484 {
23485 int ret;
23486
23487- __asm__("lcall *(%%esi); cld\n\t"
23488+ __asm__("movw %w5, %%ds\n\t"
23489+ "lcall *%%ss:(%%esi); cld\n\t"
23490+ "push %%ss\n\t"
23491+ "pop %%ds\n"
23492 "jc 1f\n\t"
23493 "xor %%ah, %%ah\n"
23494 "1:"
23495@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
23496 : "0" (PCIBIOS_SET_PCI_HW_INT),
23497 "b" ((dev->bus->number << 8) | dev->devfn),
23498 "c" ((irq << 8) | (pin + 10)),
23499- "S" (&pci_indirect));
23500+ "S" (&pci_indirect),
23501+ "r" (__PCIBIOS_DS));
23502 return !(ret & 0xff00);
23503 }
23504 EXPORT_SYMBOL(pcibios_set_irq_routing);
23505diff -urNp linux-2.6.32.46/arch/x86/power/cpu.c linux-2.6.32.46/arch/x86/power/cpu.c
23506--- linux-2.6.32.46/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
23507+++ linux-2.6.32.46/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
23508@@ -129,7 +129,7 @@ static void do_fpu_end(void)
23509 static void fix_processor_context(void)
23510 {
23511 int cpu = smp_processor_id();
23512- struct tss_struct *t = &per_cpu(init_tss, cpu);
23513+ struct tss_struct *t = init_tss + cpu;
23514
23515 set_tss_desc(cpu, t); /*
23516 * This just modifies memory; should not be
23517@@ -139,7 +139,9 @@ static void fix_processor_context(void)
23518 */
23519
23520 #ifdef CONFIG_X86_64
23521+ pax_open_kernel();
23522 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
23523+ pax_close_kernel();
23524
23525 syscall_init(); /* This sets MSR_*STAR and related */
23526 #endif
23527diff -urNp linux-2.6.32.46/arch/x86/vdso/Makefile linux-2.6.32.46/arch/x86/vdso/Makefile
23528--- linux-2.6.32.46/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
23529+++ linux-2.6.32.46/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
23530@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
23531 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
23532 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
23533
23534-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23535+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23536 GCOV_PROFILE := n
23537
23538 #
23539diff -urNp linux-2.6.32.46/arch/x86/vdso/vclock_gettime.c linux-2.6.32.46/arch/x86/vdso/vclock_gettime.c
23540--- linux-2.6.32.46/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
23541+++ linux-2.6.32.46/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
23542@@ -22,24 +22,48 @@
23543 #include <asm/hpet.h>
23544 #include <asm/unistd.h>
23545 #include <asm/io.h>
23546+#include <asm/fixmap.h>
23547 #include "vextern.h"
23548
23549 #define gtod vdso_vsyscall_gtod_data
23550
23551+notrace noinline long __vdso_fallback_time(long *t)
23552+{
23553+ long secs;
23554+ asm volatile("syscall"
23555+ : "=a" (secs)
23556+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
23557+ return secs;
23558+}
23559+
23560 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
23561 {
23562 long ret;
23563 asm("syscall" : "=a" (ret) :
23564- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
23565+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
23566 return ret;
23567 }
23568
23569+notrace static inline cycle_t __vdso_vread_hpet(void)
23570+{
23571+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
23572+}
23573+
23574+notrace static inline cycle_t __vdso_vread_tsc(void)
23575+{
23576+ cycle_t ret = (cycle_t)vget_cycles();
23577+
23578+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
23579+}
23580+
23581 notrace static inline long vgetns(void)
23582 {
23583 long v;
23584- cycles_t (*vread)(void);
23585- vread = gtod->clock.vread;
23586- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
23587+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
23588+ v = __vdso_vread_tsc();
23589+ else
23590+ v = __vdso_vread_hpet();
23591+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
23592 return (v * gtod->clock.mult) >> gtod->clock.shift;
23593 }
23594
23595@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
23596
23597 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
23598 {
23599- if (likely(gtod->sysctl_enabled))
23600+ if (likely(gtod->sysctl_enabled &&
23601+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23602+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23603 switch (clock) {
23604 case CLOCK_REALTIME:
23605 if (likely(gtod->clock.vread))
23606@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
23607 int clock_gettime(clockid_t, struct timespec *)
23608 __attribute__((weak, alias("__vdso_clock_gettime")));
23609
23610-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23611+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
23612 {
23613 long ret;
23614- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
23615+ asm("syscall" : "=a" (ret) :
23616+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
23617+ return ret;
23618+}
23619+
23620+notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23621+{
23622+ if (likely(gtod->sysctl_enabled &&
23623+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23624+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23625+ {
23626 if (likely(tv != NULL)) {
23627 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
23628 offsetof(struct timespec, tv_nsec) ||
23629@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
23630 }
23631 return 0;
23632 }
23633- asm("syscall" : "=a" (ret) :
23634- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
23635- return ret;
23636+ return __vdso_fallback_gettimeofday(tv, tz);
23637 }
23638 int gettimeofday(struct timeval *, struct timezone *)
23639 __attribute__((weak, alias("__vdso_gettimeofday")));
23640diff -urNp linux-2.6.32.46/arch/x86/vdso/vdso32-setup.c linux-2.6.32.46/arch/x86/vdso/vdso32-setup.c
23641--- linux-2.6.32.46/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
23642+++ linux-2.6.32.46/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
23643@@ -25,6 +25,7 @@
23644 #include <asm/tlbflush.h>
23645 #include <asm/vdso.h>
23646 #include <asm/proto.h>
23647+#include <asm/mman.h>
23648
23649 enum {
23650 VDSO_DISABLED = 0,
23651@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
23652 void enable_sep_cpu(void)
23653 {
23654 int cpu = get_cpu();
23655- struct tss_struct *tss = &per_cpu(init_tss, cpu);
23656+ struct tss_struct *tss = init_tss + cpu;
23657
23658 if (!boot_cpu_has(X86_FEATURE_SEP)) {
23659 put_cpu();
23660@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
23661 gate_vma.vm_start = FIXADDR_USER_START;
23662 gate_vma.vm_end = FIXADDR_USER_END;
23663 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
23664- gate_vma.vm_page_prot = __P101;
23665+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
23666 /*
23667 * Make sure the vDSO gets into every core dump.
23668 * Dumping its contents makes post-mortem fully interpretable later
23669@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
23670 if (compat)
23671 addr = VDSO_HIGH_BASE;
23672 else {
23673- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
23674+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
23675 if (IS_ERR_VALUE(addr)) {
23676 ret = addr;
23677 goto up_fail;
23678 }
23679 }
23680
23681- current->mm->context.vdso = (void *)addr;
23682+ current->mm->context.vdso = addr;
23683
23684 if (compat_uses_vma || !compat) {
23685 /*
23686@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
23687 }
23688
23689 current_thread_info()->sysenter_return =
23690- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23691+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23692
23693 up_fail:
23694 if (ret)
23695- current->mm->context.vdso = NULL;
23696+ current->mm->context.vdso = 0;
23697
23698 up_write(&mm->mmap_sem);
23699
23700@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
23701
23702 const char *arch_vma_name(struct vm_area_struct *vma)
23703 {
23704- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23705+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23706 return "[vdso]";
23707+
23708+#ifdef CONFIG_PAX_SEGMEXEC
23709+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
23710+ return "[vdso]";
23711+#endif
23712+
23713 return NULL;
23714 }
23715
23716@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
23717 struct mm_struct *mm = tsk->mm;
23718
23719 /* Check to see if this task was created in compat vdso mode */
23720- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
23721+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
23722 return &gate_vma;
23723 return NULL;
23724 }
23725diff -urNp linux-2.6.32.46/arch/x86/vdso/vdso.lds.S linux-2.6.32.46/arch/x86/vdso/vdso.lds.S
23726--- linux-2.6.32.46/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
23727+++ linux-2.6.32.46/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
23728@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
23729 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
23730 #include "vextern.h"
23731 #undef VEXTERN
23732+
23733+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
23734+VEXTERN(fallback_gettimeofday)
23735+VEXTERN(fallback_time)
23736+VEXTERN(getcpu)
23737+#undef VEXTERN
23738diff -urNp linux-2.6.32.46/arch/x86/vdso/vextern.h linux-2.6.32.46/arch/x86/vdso/vextern.h
23739--- linux-2.6.32.46/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
23740+++ linux-2.6.32.46/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
23741@@ -11,6 +11,5 @@
23742 put into vextern.h and be referenced as a pointer with vdso prefix.
23743 The main kernel later fills in the values. */
23744
23745-VEXTERN(jiffies)
23746 VEXTERN(vgetcpu_mode)
23747 VEXTERN(vsyscall_gtod_data)
23748diff -urNp linux-2.6.32.46/arch/x86/vdso/vma.c linux-2.6.32.46/arch/x86/vdso/vma.c
23749--- linux-2.6.32.46/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
23750+++ linux-2.6.32.46/arch/x86/vdso/vma.c 2011-08-23 20:24:19.000000000 -0400
23751@@ -17,8 +17,6 @@
23752 #include "vextern.h" /* Just for VMAGIC. */
23753 #undef VEXTERN
23754
23755-unsigned int __read_mostly vdso_enabled = 1;
23756-
23757 extern char vdso_start[], vdso_end[];
23758 extern unsigned short vdso_sync_cpuid;
23759
23760@@ -27,10 +25,8 @@ static unsigned vdso_size;
23761
23762 static inline void *var_ref(void *p, char *name)
23763 {
23764- if (*(void **)p != (void *)VMAGIC) {
23765- printk("VDSO: variable %s broken\n", name);
23766- vdso_enabled = 0;
23767- }
23768+ if (*(void **)p != (void *)VMAGIC)
23769+ panic("VDSO: variable %s broken\n", name);
23770 return p;
23771 }
23772
23773@@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
23774 if (!vbase)
23775 goto oom;
23776
23777- if (memcmp(vbase, "\177ELF", 4)) {
23778- printk("VDSO: I'm broken; not ELF\n");
23779- vdso_enabled = 0;
23780- }
23781+ if (memcmp(vbase, ELFMAG, SELFMAG))
23782+ panic("VDSO: I'm broken; not ELF\n");
23783
23784 #define VEXTERN(x) \
23785 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
23786 #include "vextern.h"
23787 #undef VEXTERN
23788+ vunmap(vbase);
23789 return 0;
23790
23791 oom:
23792- printk("Cannot allocate vdso\n");
23793- vdso_enabled = 0;
23794- return -ENOMEM;
23795+ panic("Cannot allocate vdso\n");
23796 }
23797 __initcall(init_vdso_vars);
23798
23799@@ -105,9 +98,6 @@ int arch_setup_additional_pages(struct l
23800 unsigned long addr;
23801 int ret;
23802
23803- if (!vdso_enabled)
23804- return 0;
23805-
23806 down_write(&mm->mmap_sem);
23807 addr = vdso_addr(mm->start_stack, vdso_size);
23808 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
23809@@ -116,7 +106,7 @@ int arch_setup_additional_pages(struct l
23810 goto up_fail;
23811 }
23812
23813- current->mm->context.vdso = (void *)addr;
23814+ current->mm->context.vdso = addr;
23815
23816 ret = install_special_mapping(mm, addr, vdso_size,
23817 VM_READ|VM_EXEC|
23818@@ -124,7 +114,7 @@ int arch_setup_additional_pages(struct l
23819 VM_ALWAYSDUMP,
23820 vdso_pages);
23821 if (ret) {
23822- current->mm->context.vdso = NULL;
23823+ current->mm->context.vdso = 0;
23824 goto up_fail;
23825 }
23826
23827@@ -132,10 +122,3 @@ up_fail:
23828 up_write(&mm->mmap_sem);
23829 return ret;
23830 }
23831-
23832-static __init int vdso_setup(char *s)
23833-{
23834- vdso_enabled = simple_strtoul(s, NULL, 0);
23835- return 0;
23836-}
23837-__setup("vdso=", vdso_setup);
23838diff -urNp linux-2.6.32.46/arch/x86/xen/enlighten.c linux-2.6.32.46/arch/x86/xen/enlighten.c
23839--- linux-2.6.32.46/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
23840+++ linux-2.6.32.46/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
23841@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
23842
23843 struct shared_info xen_dummy_shared_info;
23844
23845-void *xen_initial_gdt;
23846-
23847 /*
23848 * Point at some empty memory to start with. We map the real shared_info
23849 * page as soon as fixmap is up and running.
23850@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
23851
23852 preempt_disable();
23853
23854- start = __get_cpu_var(idt_desc).address;
23855+ start = (unsigned long)__get_cpu_var(idt_desc).address;
23856 end = start + __get_cpu_var(idt_desc).size + 1;
23857
23858 xen_mc_flush();
23859@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
23860 #endif
23861 };
23862
23863-static void xen_reboot(int reason)
23864+static __noreturn void xen_reboot(int reason)
23865 {
23866 struct sched_shutdown r = { .reason = reason };
23867
23868@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
23869 BUG();
23870 }
23871
23872-static void xen_restart(char *msg)
23873+static __noreturn void xen_restart(char *msg)
23874 {
23875 xen_reboot(SHUTDOWN_reboot);
23876 }
23877
23878-static void xen_emergency_restart(void)
23879+static __noreturn void xen_emergency_restart(void)
23880 {
23881 xen_reboot(SHUTDOWN_reboot);
23882 }
23883
23884-static void xen_machine_halt(void)
23885+static __noreturn void xen_machine_halt(void)
23886 {
23887 xen_reboot(SHUTDOWN_poweroff);
23888 }
23889@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
23890 */
23891 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
23892
23893-#ifdef CONFIG_X86_64
23894 /* Work out if we support NX */
23895- check_efer();
23896+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23897+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
23898+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
23899+ unsigned l, h;
23900+
23901+#ifdef CONFIG_X86_PAE
23902+ nx_enabled = 1;
23903+#endif
23904+ __supported_pte_mask |= _PAGE_NX;
23905+ rdmsr(MSR_EFER, l, h);
23906+ l |= EFER_NX;
23907+ wrmsr(MSR_EFER, l, h);
23908+ }
23909 #endif
23910
23911 xen_setup_features();
23912@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
23913
23914 machine_ops = xen_machine_ops;
23915
23916- /*
23917- * The only reliable way to retain the initial address of the
23918- * percpu gdt_page is to remember it here, so we can go and
23919- * mark it RW later, when the initial percpu area is freed.
23920- */
23921- xen_initial_gdt = &per_cpu(gdt_page, 0);
23922-
23923 xen_smp_init();
23924
23925 pgd = (pgd_t *)xen_start_info->pt_base;
23926diff -urNp linux-2.6.32.46/arch/x86/xen/mmu.c linux-2.6.32.46/arch/x86/xen/mmu.c
23927--- linux-2.6.32.46/arch/x86/xen/mmu.c 2011-07-13 17:23:04.000000000 -0400
23928+++ linux-2.6.32.46/arch/x86/xen/mmu.c 2011-08-24 18:35:52.000000000 -0400
23929@@ -1719,6 +1719,8 @@ __init pgd_t *xen_setup_kernel_pagetable
23930 convert_pfn_mfn(init_level4_pgt);
23931 convert_pfn_mfn(level3_ident_pgt);
23932 convert_pfn_mfn(level3_kernel_pgt);
23933+ convert_pfn_mfn(level3_vmalloc_pgt);
23934+ convert_pfn_mfn(level3_vmemmap_pgt);
23935
23936 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23937 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23938@@ -1737,7 +1739,10 @@ __init pgd_t *xen_setup_kernel_pagetable
23939 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23940 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23941 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23942+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23943+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23944 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23945+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23946 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23947 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23948
23949@@ -1860,6 +1865,7 @@ static __init void xen_post_allocator_in
23950 pv_mmu_ops.set_pud = xen_set_pud;
23951 #if PAGETABLE_LEVELS == 4
23952 pv_mmu_ops.set_pgd = xen_set_pgd;
23953+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
23954 #endif
23955
23956 /* This will work as long as patching hasn't happened yet
23957@@ -1946,6 +1952,7 @@ static const struct pv_mmu_ops xen_mmu_o
23958 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
23959 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
23960 .set_pgd = xen_set_pgd_hyper,
23961+ .set_pgd_batched = xen_set_pgd_hyper,
23962
23963 .alloc_pud = xen_alloc_pmd_init,
23964 .release_pud = xen_release_pmd_init,
23965diff -urNp linux-2.6.32.46/arch/x86/xen/smp.c linux-2.6.32.46/arch/x86/xen/smp.c
23966--- linux-2.6.32.46/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
23967+++ linux-2.6.32.46/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
23968@@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
23969 {
23970 BUG_ON(smp_processor_id() != 0);
23971 native_smp_prepare_boot_cpu();
23972-
23973- /* We've switched to the "real" per-cpu gdt, so make sure the
23974- old memory can be recycled */
23975- make_lowmem_page_readwrite(xen_initial_gdt);
23976-
23977 xen_setup_vcpu_info_placement();
23978 }
23979
23980@@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
23981 gdt = get_cpu_gdt_table(cpu);
23982
23983 ctxt->flags = VGCF_IN_KERNEL;
23984- ctxt->user_regs.ds = __USER_DS;
23985- ctxt->user_regs.es = __USER_DS;
23986+ ctxt->user_regs.ds = __KERNEL_DS;
23987+ ctxt->user_regs.es = __KERNEL_DS;
23988 ctxt->user_regs.ss = __KERNEL_DS;
23989 #ifdef CONFIG_X86_32
23990 ctxt->user_regs.fs = __KERNEL_PERCPU;
23991- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23992+ savesegment(gs, ctxt->user_regs.gs);
23993 #else
23994 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23995 #endif
23996@@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
23997 int rc;
23998
23999 per_cpu(current_task, cpu) = idle;
24000+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
24001 #ifdef CONFIG_X86_32
24002 irq_ctx_init(cpu);
24003 #else
24004 clear_tsk_thread_flag(idle, TIF_FORK);
24005- per_cpu(kernel_stack, cpu) =
24006- (unsigned long)task_stack_page(idle) -
24007- KERNEL_STACK_OFFSET + THREAD_SIZE;
24008+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
24009 #endif
24010 xen_setup_runstate_info(cpu);
24011 xen_setup_timer(cpu);
24012diff -urNp linux-2.6.32.46/arch/x86/xen/xen-asm_32.S linux-2.6.32.46/arch/x86/xen/xen-asm_32.S
24013--- linux-2.6.32.46/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
24014+++ linux-2.6.32.46/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
24015@@ -83,14 +83,14 @@ ENTRY(xen_iret)
24016 ESP_OFFSET=4 # bytes pushed onto stack
24017
24018 /*
24019- * Store vcpu_info pointer for easy access. Do it this way to
24020- * avoid having to reload %fs
24021+ * Store vcpu_info pointer for easy access.
24022 */
24023 #ifdef CONFIG_SMP
24024- GET_THREAD_INFO(%eax)
24025- movl TI_cpu(%eax), %eax
24026- movl __per_cpu_offset(,%eax,4), %eax
24027- mov per_cpu__xen_vcpu(%eax), %eax
24028+ push %fs
24029+ mov $(__KERNEL_PERCPU), %eax
24030+ mov %eax, %fs
24031+ mov PER_CPU_VAR(xen_vcpu), %eax
24032+ pop %fs
24033 #else
24034 movl per_cpu__xen_vcpu, %eax
24035 #endif
24036diff -urNp linux-2.6.32.46/arch/x86/xen/xen-head.S linux-2.6.32.46/arch/x86/xen/xen-head.S
24037--- linux-2.6.32.46/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
24038+++ linux-2.6.32.46/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
24039@@ -19,6 +19,17 @@ ENTRY(startup_xen)
24040 #ifdef CONFIG_X86_32
24041 mov %esi,xen_start_info
24042 mov $init_thread_union+THREAD_SIZE,%esp
24043+#ifdef CONFIG_SMP
24044+ movl $cpu_gdt_table,%edi
24045+ movl $__per_cpu_load,%eax
24046+ movw %ax,__KERNEL_PERCPU + 2(%edi)
24047+ rorl $16,%eax
24048+ movb %al,__KERNEL_PERCPU + 4(%edi)
24049+ movb %ah,__KERNEL_PERCPU + 7(%edi)
24050+ movl $__per_cpu_end - 1,%eax
24051+ subl $__per_cpu_start,%eax
24052+ movw %ax,__KERNEL_PERCPU + 0(%edi)
24053+#endif
24054 #else
24055 mov %rsi,xen_start_info
24056 mov $init_thread_union+THREAD_SIZE,%rsp
24057diff -urNp linux-2.6.32.46/arch/x86/xen/xen-ops.h linux-2.6.32.46/arch/x86/xen/xen-ops.h
24058--- linux-2.6.32.46/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
24059+++ linux-2.6.32.46/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
24060@@ -10,8 +10,6 @@
24061 extern const char xen_hypervisor_callback[];
24062 extern const char xen_failsafe_callback[];
24063
24064-extern void *xen_initial_gdt;
24065-
24066 struct trap_info;
24067 void xen_copy_trap_info(struct trap_info *traps);
24068
24069diff -urNp linux-2.6.32.46/block/blk-integrity.c linux-2.6.32.46/block/blk-integrity.c
24070--- linux-2.6.32.46/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
24071+++ linux-2.6.32.46/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
24072@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
24073 NULL,
24074 };
24075
24076-static struct sysfs_ops integrity_ops = {
24077+static const struct sysfs_ops integrity_ops = {
24078 .show = &integrity_attr_show,
24079 .store = &integrity_attr_store,
24080 };
24081diff -urNp linux-2.6.32.46/block/blk-iopoll.c linux-2.6.32.46/block/blk-iopoll.c
24082--- linux-2.6.32.46/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
24083+++ linux-2.6.32.46/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
24084@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
24085 }
24086 EXPORT_SYMBOL(blk_iopoll_complete);
24087
24088-static void blk_iopoll_softirq(struct softirq_action *h)
24089+static void blk_iopoll_softirq(void)
24090 {
24091 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
24092 int rearm = 0, budget = blk_iopoll_budget;
24093diff -urNp linux-2.6.32.46/block/blk-map.c linux-2.6.32.46/block/blk-map.c
24094--- linux-2.6.32.46/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
24095+++ linux-2.6.32.46/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
24096@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
24097 * direct dma. else, set up kernel bounce buffers
24098 */
24099 uaddr = (unsigned long) ubuf;
24100- if (blk_rq_aligned(q, ubuf, len) && !map_data)
24101+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
24102 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
24103 else
24104 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
24105@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
24106 for (i = 0; i < iov_count; i++) {
24107 unsigned long uaddr = (unsigned long)iov[i].iov_base;
24108
24109+ if (!iov[i].iov_len)
24110+ return -EINVAL;
24111+
24112 if (uaddr & queue_dma_alignment(q)) {
24113 unaligned = 1;
24114 break;
24115 }
24116- if (!iov[i].iov_len)
24117- return -EINVAL;
24118 }
24119
24120 if (unaligned || (q->dma_pad_mask & len) || map_data)
24121@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
24122 if (!len || !kbuf)
24123 return -EINVAL;
24124
24125- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
24126+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
24127 if (do_copy)
24128 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24129 else
24130diff -urNp linux-2.6.32.46/block/blk-softirq.c linux-2.6.32.46/block/blk-softirq.c
24131--- linux-2.6.32.46/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
24132+++ linux-2.6.32.46/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
24133@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
24134 * Softirq action handler - move entries to local list and loop over them
24135 * while passing them to the queue registered handler.
24136 */
24137-static void blk_done_softirq(struct softirq_action *h)
24138+static void blk_done_softirq(void)
24139 {
24140 struct list_head *cpu_list, local_list;
24141
24142diff -urNp linux-2.6.32.46/block/blk-sysfs.c linux-2.6.32.46/block/blk-sysfs.c
24143--- linux-2.6.32.46/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
24144+++ linux-2.6.32.46/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
24145@@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
24146 kmem_cache_free(blk_requestq_cachep, q);
24147 }
24148
24149-static struct sysfs_ops queue_sysfs_ops = {
24150+static const struct sysfs_ops queue_sysfs_ops = {
24151 .show = queue_attr_show,
24152 .store = queue_attr_store,
24153 };
24154diff -urNp linux-2.6.32.46/block/bsg.c linux-2.6.32.46/block/bsg.c
24155--- linux-2.6.32.46/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
24156+++ linux-2.6.32.46/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
24157@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
24158 struct sg_io_v4 *hdr, struct bsg_device *bd,
24159 fmode_t has_write_perm)
24160 {
24161+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24162+ unsigned char *cmdptr;
24163+
24164 if (hdr->request_len > BLK_MAX_CDB) {
24165 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24166 if (!rq->cmd)
24167 return -ENOMEM;
24168- }
24169+ cmdptr = rq->cmd;
24170+ } else
24171+ cmdptr = tmpcmd;
24172
24173- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
24174+ if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
24175 hdr->request_len))
24176 return -EFAULT;
24177
24178+ if (cmdptr != rq->cmd)
24179+ memcpy(rq->cmd, cmdptr, hdr->request_len);
24180+
24181 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24182 if (blk_verify_command(rq->cmd, has_write_perm))
24183 return -EPERM;
24184diff -urNp linux-2.6.32.46/block/elevator.c linux-2.6.32.46/block/elevator.c
24185--- linux-2.6.32.46/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
24186+++ linux-2.6.32.46/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
24187@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
24188 return error;
24189 }
24190
24191-static struct sysfs_ops elv_sysfs_ops = {
24192+static const struct sysfs_ops elv_sysfs_ops = {
24193 .show = elv_attr_show,
24194 .store = elv_attr_store,
24195 };
24196diff -urNp linux-2.6.32.46/block/scsi_ioctl.c linux-2.6.32.46/block/scsi_ioctl.c
24197--- linux-2.6.32.46/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
24198+++ linux-2.6.32.46/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
24199@@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
24200 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24201 struct sg_io_hdr *hdr, fmode_t mode)
24202 {
24203- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24204+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24205+ unsigned char *cmdptr;
24206+
24207+ if (rq->cmd != rq->__cmd)
24208+ cmdptr = rq->cmd;
24209+ else
24210+ cmdptr = tmpcmd;
24211+
24212+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24213 return -EFAULT;
24214+
24215+ if (cmdptr != rq->cmd)
24216+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24217+
24218 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24219 return -EPERM;
24220
24221@@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
24222 int err;
24223 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24224 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24225+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24226+ unsigned char *cmdptr;
24227
24228 if (!sic)
24229 return -EINVAL;
24230@@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
24231 */
24232 err = -EFAULT;
24233 rq->cmd_len = cmdlen;
24234- if (copy_from_user(rq->cmd, sic->data, cmdlen))
24235+
24236+ if (rq->cmd != rq->__cmd)
24237+ cmdptr = rq->cmd;
24238+ else
24239+ cmdptr = tmpcmd;
24240+
24241+ if (copy_from_user(cmdptr, sic->data, cmdlen))
24242 goto error;
24243
24244+ if (rq->cmd != cmdptr)
24245+ memcpy(rq->cmd, cmdptr, cmdlen);
24246+
24247 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
24248 goto error;
24249
24250diff -urNp linux-2.6.32.46/crypto/cryptd.c linux-2.6.32.46/crypto/cryptd.c
24251--- linux-2.6.32.46/crypto/cryptd.c 2011-03-27 14:31:47.000000000 -0400
24252+++ linux-2.6.32.46/crypto/cryptd.c 2011-08-23 21:22:32.000000000 -0400
24253@@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
24254
24255 struct cryptd_blkcipher_request_ctx {
24256 crypto_completion_t complete;
24257-};
24258+} __no_const;
24259
24260 struct cryptd_hash_ctx {
24261 struct crypto_shash *child;
24262diff -urNp linux-2.6.32.46/crypto/gf128mul.c linux-2.6.32.46/crypto/gf128mul.c
24263--- linux-2.6.32.46/crypto/gf128mul.c 2011-03-27 14:31:47.000000000 -0400
24264+++ linux-2.6.32.46/crypto/gf128mul.c 2011-07-06 19:53:33.000000000 -0400
24265@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
24266 for (i = 0; i < 7; ++i)
24267 gf128mul_x_lle(&p[i + 1], &p[i]);
24268
24269- memset(r, 0, sizeof(r));
24270+ memset(r, 0, sizeof(*r));
24271 for (i = 0;;) {
24272 u8 ch = ((u8 *)b)[15 - i];
24273
24274@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
24275 for (i = 0; i < 7; ++i)
24276 gf128mul_x_bbe(&p[i + 1], &p[i]);
24277
24278- memset(r, 0, sizeof(r));
24279+ memset(r, 0, sizeof(*r));
24280 for (i = 0;;) {
24281 u8 ch = ((u8 *)b)[i];
24282
24283diff -urNp linux-2.6.32.46/crypto/serpent.c linux-2.6.32.46/crypto/serpent.c
24284--- linux-2.6.32.46/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
24285+++ linux-2.6.32.46/crypto/serpent.c 2011-08-18 23:59:56.000000000 -0400
24286@@ -21,6 +21,7 @@
24287 #include <asm/byteorder.h>
24288 #include <linux/crypto.h>
24289 #include <linux/types.h>
24290+#include <linux/sched.h>
24291
24292 /* Key is padded to the maximum of 256 bits before round key generation.
24293 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
24294@@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_
24295 u32 r0,r1,r2,r3,r4;
24296 int i;
24297
24298+ pax_track_stack();
24299+
24300 /* Copy key, add padding */
24301
24302 for (i = 0; i < keylen; ++i)
24303diff -urNp linux-2.6.32.46/Documentation/dontdiff linux-2.6.32.46/Documentation/dontdiff
24304--- linux-2.6.32.46/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
24305+++ linux-2.6.32.46/Documentation/dontdiff 2011-08-21 18:59:02.000000000 -0400
24306@@ -1,13 +1,16 @@
24307 *.a
24308 *.aux
24309 *.bin
24310+*.cis
24311 *.cpio
24312 *.csp
24313+*.dbg
24314 *.dsp
24315 *.dvi
24316 *.elf
24317 *.eps
24318 *.fw
24319+*.gcno
24320 *.gen.S
24321 *.gif
24322 *.grep
24323@@ -38,8 +41,10 @@
24324 *.tab.h
24325 *.tex
24326 *.ver
24327+*.vim
24328 *.xml
24329 *_MODULES
24330+*_reg_safe.h
24331 *_vga16.c
24332 *~
24333 *.9
24334@@ -49,11 +54,16 @@
24335 53c700_d.h
24336 CVS
24337 ChangeSet
24338+GPATH
24339+GRTAGS
24340+GSYMS
24341+GTAGS
24342 Image
24343 Kerntypes
24344 Module.markers
24345 Module.symvers
24346 PENDING
24347+PERF*
24348 SCCS
24349 System.map*
24350 TAGS
24351@@ -76,7 +86,11 @@ btfixupprep
24352 build
24353 bvmlinux
24354 bzImage*
24355+capability_names.h
24356+capflags.c
24357 classlist.h*
24358+clut_vga16.c
24359+common-cmds.h
24360 comp*.log
24361 compile.h*
24362 conf
24363@@ -97,19 +111,21 @@ elfconfig.h*
24364 fixdep
24365 fore200e_mkfirm
24366 fore200e_pca_fw.c*
24367+gate.lds
24368 gconf
24369 gen-devlist
24370 gen_crc32table
24371 gen_init_cpio
24372 genksyms
24373 *_gray256.c
24374+hash
24375 ihex2fw
24376 ikconfig.h*
24377 initramfs_data.cpio
24378+initramfs_data.cpio.bz2
24379 initramfs_data.cpio.gz
24380 initramfs_list
24381 kallsyms
24382-kconfig
24383 keywords.c
24384 ksym.c*
24385 ksym.h*
24386@@ -133,7 +149,9 @@ mkboot
24387 mkbugboot
24388 mkcpustr
24389 mkdep
24390+mkpiggy
24391 mkprep
24392+mkregtable
24393 mktables
24394 mktree
24395 modpost
24396@@ -149,6 +167,7 @@ patches*
24397 pca200e.bin
24398 pca200e_ecd.bin2
24399 piggy.gz
24400+piggy.S
24401 piggyback
24402 pnmtologo
24403 ppc_defs.h*
24404@@ -157,12 +176,15 @@ qconf
24405 raid6altivec*.c
24406 raid6int*.c
24407 raid6tables.c
24408+regdb.c
24409 relocs
24410+rlim_names.h
24411 series
24412 setup
24413 setup.bin
24414 setup.elf
24415 sImage
24416+slabinfo
24417 sm_tbl*
24418 split-include
24419 syscalltab.h
24420@@ -186,14 +208,20 @@ version.h*
24421 vmlinux
24422 vmlinux-*
24423 vmlinux.aout
24424+vmlinux.bin.all
24425+vmlinux.bin.bz2
24426 vmlinux.lds
24427+vmlinux.relocs
24428+voffset.h
24429 vsyscall.lds
24430 vsyscall_32.lds
24431 wanxlfw.inc
24432 uImage
24433 unifdef
24434+utsrelease.h
24435 wakeup.bin
24436 wakeup.elf
24437 wakeup.lds
24438 zImage*
24439 zconf.hash.c
24440+zoffset.h
24441diff -urNp linux-2.6.32.46/Documentation/kernel-parameters.txt linux-2.6.32.46/Documentation/kernel-parameters.txt
24442--- linux-2.6.32.46/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
24443+++ linux-2.6.32.46/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
24444@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
24445 the specified number of seconds. This is to be used if
24446 your oopses keep scrolling off the screen.
24447
24448+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
24449+ virtualization environments that don't cope well with the
24450+ expand down segment used by UDEREF on X86-32 or the frequent
24451+ page table updates on X86-64.
24452+
24453+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
24454+
24455 pcbit= [HW,ISDN]
24456
24457 pcd. [PARIDE]
24458diff -urNp linux-2.6.32.46/drivers/acpi/acpi_pad.c linux-2.6.32.46/drivers/acpi/acpi_pad.c
24459--- linux-2.6.32.46/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
24460+++ linux-2.6.32.46/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
24461@@ -30,7 +30,7 @@
24462 #include <acpi/acpi_bus.h>
24463 #include <acpi/acpi_drivers.h>
24464
24465-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
24466+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
24467 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
24468 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
24469 static DEFINE_MUTEX(isolated_cpus_lock);
24470diff -urNp linux-2.6.32.46/drivers/acpi/battery.c linux-2.6.32.46/drivers/acpi/battery.c
24471--- linux-2.6.32.46/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
24472+++ linux-2.6.32.46/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
24473@@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
24474 }
24475
24476 static struct battery_file {
24477- struct file_operations ops;
24478+ const struct file_operations ops;
24479 mode_t mode;
24480 const char *name;
24481 } acpi_battery_file[] = {
24482diff -urNp linux-2.6.32.46/drivers/acpi/dock.c linux-2.6.32.46/drivers/acpi/dock.c
24483--- linux-2.6.32.46/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
24484+++ linux-2.6.32.46/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
24485@@ -77,7 +77,7 @@ struct dock_dependent_device {
24486 struct list_head list;
24487 struct list_head hotplug_list;
24488 acpi_handle handle;
24489- struct acpi_dock_ops *ops;
24490+ const struct acpi_dock_ops *ops;
24491 void *context;
24492 };
24493
24494@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
24495 * the dock driver after _DCK is executed.
24496 */
24497 int
24498-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
24499+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
24500 void *context)
24501 {
24502 struct dock_dependent_device *dd;
24503diff -urNp linux-2.6.32.46/drivers/acpi/osl.c linux-2.6.32.46/drivers/acpi/osl.c
24504--- linux-2.6.32.46/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
24505+++ linux-2.6.32.46/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
24506@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
24507 void __iomem *virt_addr;
24508
24509 virt_addr = ioremap(phys_addr, width);
24510+ if (!virt_addr)
24511+ return AE_NO_MEMORY;
24512 if (!value)
24513 value = &dummy;
24514
24515@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
24516 void __iomem *virt_addr;
24517
24518 virt_addr = ioremap(phys_addr, width);
24519+ if (!virt_addr)
24520+ return AE_NO_MEMORY;
24521
24522 switch (width) {
24523 case 8:
24524diff -urNp linux-2.6.32.46/drivers/acpi/power_meter.c linux-2.6.32.46/drivers/acpi/power_meter.c
24525--- linux-2.6.32.46/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
24526+++ linux-2.6.32.46/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
24527@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
24528 return res;
24529
24530 temp /= 1000;
24531- if (temp < 0)
24532- return -EINVAL;
24533
24534 mutex_lock(&resource->lock);
24535 resource->trip[attr->index - 7] = temp;
24536diff -urNp linux-2.6.32.46/drivers/acpi/proc.c linux-2.6.32.46/drivers/acpi/proc.c
24537--- linux-2.6.32.46/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
24538+++ linux-2.6.32.46/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
24539@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
24540 size_t count, loff_t * ppos)
24541 {
24542 struct list_head *node, *next;
24543- char strbuf[5];
24544- char str[5] = "";
24545- unsigned int len = count;
24546+ char strbuf[5] = {0};
24547 struct acpi_device *found_dev = NULL;
24548
24549- if (len > 4)
24550- len = 4;
24551- if (len < 0)
24552- return -EFAULT;
24553+ if (count > 4)
24554+ count = 4;
24555
24556- if (copy_from_user(strbuf, buffer, len))
24557+ if (copy_from_user(strbuf, buffer, count))
24558 return -EFAULT;
24559- strbuf[len] = '\0';
24560- sscanf(strbuf, "%s", str);
24561+ strbuf[count] = '\0';
24562
24563 mutex_lock(&acpi_device_lock);
24564 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
24565@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
24566 if (!dev->wakeup.flags.valid)
24567 continue;
24568
24569- if (!strncmp(dev->pnp.bus_id, str, 4)) {
24570+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
24571 dev->wakeup.state.enabled =
24572 dev->wakeup.state.enabled ? 0 : 1;
24573 found_dev = dev;
24574diff -urNp linux-2.6.32.46/drivers/acpi/processor_core.c linux-2.6.32.46/drivers/acpi/processor_core.c
24575--- linux-2.6.32.46/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
24576+++ linux-2.6.32.46/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
24577@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
24578 return 0;
24579 }
24580
24581- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
24582+ BUG_ON(pr->id >= nr_cpu_ids);
24583
24584 /*
24585 * Buggy BIOS check
24586diff -urNp linux-2.6.32.46/drivers/acpi/sbshc.c linux-2.6.32.46/drivers/acpi/sbshc.c
24587--- linux-2.6.32.46/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
24588+++ linux-2.6.32.46/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
24589@@ -17,7 +17,7 @@
24590
24591 #define PREFIX "ACPI: "
24592
24593-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
24594+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
24595 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
24596
24597 struct acpi_smb_hc {
24598diff -urNp linux-2.6.32.46/drivers/acpi/sleep.c linux-2.6.32.46/drivers/acpi/sleep.c
24599--- linux-2.6.32.46/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
24600+++ linux-2.6.32.46/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
24601@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
24602 }
24603 }
24604
24605-static struct platform_suspend_ops acpi_suspend_ops = {
24606+static const struct platform_suspend_ops acpi_suspend_ops = {
24607 .valid = acpi_suspend_state_valid,
24608 .begin = acpi_suspend_begin,
24609 .prepare_late = acpi_pm_prepare,
24610@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
24611 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24612 * been requested.
24613 */
24614-static struct platform_suspend_ops acpi_suspend_ops_old = {
24615+static const struct platform_suspend_ops acpi_suspend_ops_old = {
24616 .valid = acpi_suspend_state_valid,
24617 .begin = acpi_suspend_begin_old,
24618 .prepare_late = acpi_pm_disable_gpes,
24619@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
24620 acpi_enable_all_runtime_gpes();
24621 }
24622
24623-static struct platform_hibernation_ops acpi_hibernation_ops = {
24624+static const struct platform_hibernation_ops acpi_hibernation_ops = {
24625 .begin = acpi_hibernation_begin,
24626 .end = acpi_pm_end,
24627 .pre_snapshot = acpi_hibernation_pre_snapshot,
24628@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
24629 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24630 * been requested.
24631 */
24632-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
24633+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
24634 .begin = acpi_hibernation_begin_old,
24635 .end = acpi_pm_end,
24636 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
24637diff -urNp linux-2.6.32.46/drivers/acpi/video.c linux-2.6.32.46/drivers/acpi/video.c
24638--- linux-2.6.32.46/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
24639+++ linux-2.6.32.46/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
24640@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
24641 vd->brightness->levels[request_level]);
24642 }
24643
24644-static struct backlight_ops acpi_backlight_ops = {
24645+static const struct backlight_ops acpi_backlight_ops = {
24646 .get_brightness = acpi_video_get_brightness,
24647 .update_status = acpi_video_set_brightness,
24648 };
24649diff -urNp linux-2.6.32.46/drivers/ata/ahci.c linux-2.6.32.46/drivers/ata/ahci.c
24650--- linux-2.6.32.46/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
24651+++ linux-2.6.32.46/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
24652@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
24653 .sdev_attrs = ahci_sdev_attrs,
24654 };
24655
24656-static struct ata_port_operations ahci_ops = {
24657+static const struct ata_port_operations ahci_ops = {
24658 .inherits = &sata_pmp_port_ops,
24659
24660 .qc_defer = sata_pmp_qc_defer_cmd_switch,
24661@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
24662 .port_stop = ahci_port_stop,
24663 };
24664
24665-static struct ata_port_operations ahci_vt8251_ops = {
24666+static const struct ata_port_operations ahci_vt8251_ops = {
24667 .inherits = &ahci_ops,
24668 .hardreset = ahci_vt8251_hardreset,
24669 };
24670
24671-static struct ata_port_operations ahci_p5wdh_ops = {
24672+static const struct ata_port_operations ahci_p5wdh_ops = {
24673 .inherits = &ahci_ops,
24674 .hardreset = ahci_p5wdh_hardreset,
24675 };
24676
24677-static struct ata_port_operations ahci_sb600_ops = {
24678+static const struct ata_port_operations ahci_sb600_ops = {
24679 .inherits = &ahci_ops,
24680 .softreset = ahci_sb600_softreset,
24681 .pmp_softreset = ahci_sb600_softreset,
24682diff -urNp linux-2.6.32.46/drivers/ata/ata_generic.c linux-2.6.32.46/drivers/ata/ata_generic.c
24683--- linux-2.6.32.46/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
24684+++ linux-2.6.32.46/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
24685@@ -104,7 +104,7 @@ static struct scsi_host_template generic
24686 ATA_BMDMA_SHT(DRV_NAME),
24687 };
24688
24689-static struct ata_port_operations generic_port_ops = {
24690+static const struct ata_port_operations generic_port_ops = {
24691 .inherits = &ata_bmdma_port_ops,
24692 .cable_detect = ata_cable_unknown,
24693 .set_mode = generic_set_mode,
24694diff -urNp linux-2.6.32.46/drivers/ata/ata_piix.c linux-2.6.32.46/drivers/ata/ata_piix.c
24695--- linux-2.6.32.46/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
24696+++ linux-2.6.32.46/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
24697@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
24698 ATA_BMDMA_SHT(DRV_NAME),
24699 };
24700
24701-static struct ata_port_operations piix_pata_ops = {
24702+static const struct ata_port_operations piix_pata_ops = {
24703 .inherits = &ata_bmdma32_port_ops,
24704 .cable_detect = ata_cable_40wire,
24705 .set_piomode = piix_set_piomode,
24706@@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
24707 .prereset = piix_pata_prereset,
24708 };
24709
24710-static struct ata_port_operations piix_vmw_ops = {
24711+static const struct ata_port_operations piix_vmw_ops = {
24712 .inherits = &piix_pata_ops,
24713 .bmdma_status = piix_vmw_bmdma_status,
24714 };
24715
24716-static struct ata_port_operations ich_pata_ops = {
24717+static const struct ata_port_operations ich_pata_ops = {
24718 .inherits = &piix_pata_ops,
24719 .cable_detect = ich_pata_cable_detect,
24720 .set_dmamode = ich_set_dmamode,
24721 };
24722
24723-static struct ata_port_operations piix_sata_ops = {
24724+static const struct ata_port_operations piix_sata_ops = {
24725 .inherits = &ata_bmdma_port_ops,
24726 };
24727
24728-static struct ata_port_operations piix_sidpr_sata_ops = {
24729+static const struct ata_port_operations piix_sidpr_sata_ops = {
24730 .inherits = &piix_sata_ops,
24731 .hardreset = sata_std_hardreset,
24732 .scr_read = piix_sidpr_scr_read,
24733diff -urNp linux-2.6.32.46/drivers/ata/libata-acpi.c linux-2.6.32.46/drivers/ata/libata-acpi.c
24734--- linux-2.6.32.46/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
24735+++ linux-2.6.32.46/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
24736@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
24737 ata_acpi_uevent(dev->link->ap, dev, event);
24738 }
24739
24740-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24741+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24742 .handler = ata_acpi_dev_notify_dock,
24743 .uevent = ata_acpi_dev_uevent,
24744 };
24745
24746-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24747+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24748 .handler = ata_acpi_ap_notify_dock,
24749 .uevent = ata_acpi_ap_uevent,
24750 };
24751diff -urNp linux-2.6.32.46/drivers/ata/libata-core.c linux-2.6.32.46/drivers/ata/libata-core.c
24752--- linux-2.6.32.46/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
24753+++ linux-2.6.32.46/drivers/ata/libata-core.c 2011-08-05 20:33:55.000000000 -0400
24754@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
24755 struct ata_port *ap;
24756 unsigned int tag;
24757
24758- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24759+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24760 ap = qc->ap;
24761
24762 qc->flags = 0;
24763@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
24764 struct ata_port *ap;
24765 struct ata_link *link;
24766
24767- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24768+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24769 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
24770 ap = qc->ap;
24771 link = qc->dev->link;
24772@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
24773 * LOCKING:
24774 * None.
24775 */
24776-static void ata_finalize_port_ops(struct ata_port_operations *ops)
24777+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
24778 {
24779 static DEFINE_SPINLOCK(lock);
24780 const struct ata_port_operations *cur;
24781@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
24782 return;
24783
24784 spin_lock(&lock);
24785+ pax_open_kernel();
24786
24787 for (cur = ops->inherits; cur; cur = cur->inherits) {
24788 void **inherit = (void **)cur;
24789@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
24790 if (IS_ERR(*pp))
24791 *pp = NULL;
24792
24793- ops->inherits = NULL;
24794+ *(struct ata_port_operations **)&ops->inherits = NULL;
24795
24796+ pax_close_kernel();
24797 spin_unlock(&lock);
24798 }
24799
24800@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
24801 */
24802 /* KILLME - the only user left is ipr */
24803 void ata_host_init(struct ata_host *host, struct device *dev,
24804- unsigned long flags, struct ata_port_operations *ops)
24805+ unsigned long flags, const struct ata_port_operations *ops)
24806 {
24807 spin_lock_init(&host->lock);
24808 host->dev = dev;
24809@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
24810 /* truly dummy */
24811 }
24812
24813-struct ata_port_operations ata_dummy_port_ops = {
24814+const struct ata_port_operations ata_dummy_port_ops = {
24815 .qc_prep = ata_noop_qc_prep,
24816 .qc_issue = ata_dummy_qc_issue,
24817 .error_handler = ata_dummy_error_handler,
24818diff -urNp linux-2.6.32.46/drivers/ata/libata-eh.c linux-2.6.32.46/drivers/ata/libata-eh.c
24819--- linux-2.6.32.46/drivers/ata/libata-eh.c 2011-08-09 18:35:28.000000000 -0400
24820+++ linux-2.6.32.46/drivers/ata/libata-eh.c 2011-08-09 18:33:59.000000000 -0400
24821@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
24822 {
24823 struct ata_link *link;
24824
24825+ pax_track_stack();
24826+
24827 ata_for_each_link(link, ap, HOST_FIRST)
24828 ata_eh_link_report(link);
24829 }
24830@@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_
24831 */
24832 void ata_std_error_handler(struct ata_port *ap)
24833 {
24834- struct ata_port_operations *ops = ap->ops;
24835+ const struct ata_port_operations *ops = ap->ops;
24836 ata_reset_fn_t hardreset = ops->hardreset;
24837
24838 /* ignore built-in hardreset if SCR access is not available */
24839diff -urNp linux-2.6.32.46/drivers/ata/libata-pmp.c linux-2.6.32.46/drivers/ata/libata-pmp.c
24840--- linux-2.6.32.46/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
24841+++ linux-2.6.32.46/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
24842@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
24843 */
24844 static int sata_pmp_eh_recover(struct ata_port *ap)
24845 {
24846- struct ata_port_operations *ops = ap->ops;
24847+ const struct ata_port_operations *ops = ap->ops;
24848 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
24849 struct ata_link *pmp_link = &ap->link;
24850 struct ata_device *pmp_dev = pmp_link->device;
24851diff -urNp linux-2.6.32.46/drivers/ata/pata_acpi.c linux-2.6.32.46/drivers/ata/pata_acpi.c
24852--- linux-2.6.32.46/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
24853+++ linux-2.6.32.46/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
24854@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
24855 ATA_BMDMA_SHT(DRV_NAME),
24856 };
24857
24858-static struct ata_port_operations pacpi_ops = {
24859+static const struct ata_port_operations pacpi_ops = {
24860 .inherits = &ata_bmdma_port_ops,
24861 .qc_issue = pacpi_qc_issue,
24862 .cable_detect = pacpi_cable_detect,
24863diff -urNp linux-2.6.32.46/drivers/ata/pata_ali.c linux-2.6.32.46/drivers/ata/pata_ali.c
24864--- linux-2.6.32.46/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
24865+++ linux-2.6.32.46/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
24866@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
24867 * Port operations for PIO only ALi
24868 */
24869
24870-static struct ata_port_operations ali_early_port_ops = {
24871+static const struct ata_port_operations ali_early_port_ops = {
24872 .inherits = &ata_sff_port_ops,
24873 .cable_detect = ata_cable_40wire,
24874 .set_piomode = ali_set_piomode,
24875@@ -382,7 +382,7 @@ static const struct ata_port_operations
24876 * Port operations for DMA capable ALi without cable
24877 * detect
24878 */
24879-static struct ata_port_operations ali_20_port_ops = {
24880+static const struct ata_port_operations ali_20_port_ops = {
24881 .inherits = &ali_dma_base_ops,
24882 .cable_detect = ata_cable_40wire,
24883 .mode_filter = ali_20_filter,
24884@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
24885 /*
24886 * Port operations for DMA capable ALi with cable detect
24887 */
24888-static struct ata_port_operations ali_c2_port_ops = {
24889+static const struct ata_port_operations ali_c2_port_ops = {
24890 .inherits = &ali_dma_base_ops,
24891 .check_atapi_dma = ali_check_atapi_dma,
24892 .cable_detect = ali_c2_cable_detect,
24893@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
24894 /*
24895 * Port operations for DMA capable ALi with cable detect
24896 */
24897-static struct ata_port_operations ali_c4_port_ops = {
24898+static const struct ata_port_operations ali_c4_port_ops = {
24899 .inherits = &ali_dma_base_ops,
24900 .check_atapi_dma = ali_check_atapi_dma,
24901 .cable_detect = ali_c2_cable_detect,
24902@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
24903 /*
24904 * Port operations for DMA capable ALi with cable detect and LBA48
24905 */
24906-static struct ata_port_operations ali_c5_port_ops = {
24907+static const struct ata_port_operations ali_c5_port_ops = {
24908 .inherits = &ali_dma_base_ops,
24909 .check_atapi_dma = ali_check_atapi_dma,
24910 .dev_config = ali_warn_atapi_dma,
24911diff -urNp linux-2.6.32.46/drivers/ata/pata_amd.c linux-2.6.32.46/drivers/ata/pata_amd.c
24912--- linux-2.6.32.46/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
24913+++ linux-2.6.32.46/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
24914@@ -397,28 +397,28 @@ static const struct ata_port_operations
24915 .prereset = amd_pre_reset,
24916 };
24917
24918-static struct ata_port_operations amd33_port_ops = {
24919+static const struct ata_port_operations amd33_port_ops = {
24920 .inherits = &amd_base_port_ops,
24921 .cable_detect = ata_cable_40wire,
24922 .set_piomode = amd33_set_piomode,
24923 .set_dmamode = amd33_set_dmamode,
24924 };
24925
24926-static struct ata_port_operations amd66_port_ops = {
24927+static const struct ata_port_operations amd66_port_ops = {
24928 .inherits = &amd_base_port_ops,
24929 .cable_detect = ata_cable_unknown,
24930 .set_piomode = amd66_set_piomode,
24931 .set_dmamode = amd66_set_dmamode,
24932 };
24933
24934-static struct ata_port_operations amd100_port_ops = {
24935+static const struct ata_port_operations amd100_port_ops = {
24936 .inherits = &amd_base_port_ops,
24937 .cable_detect = ata_cable_unknown,
24938 .set_piomode = amd100_set_piomode,
24939 .set_dmamode = amd100_set_dmamode,
24940 };
24941
24942-static struct ata_port_operations amd133_port_ops = {
24943+static const struct ata_port_operations amd133_port_ops = {
24944 .inherits = &amd_base_port_ops,
24945 .cable_detect = amd_cable_detect,
24946 .set_piomode = amd133_set_piomode,
24947@@ -433,13 +433,13 @@ static const struct ata_port_operations
24948 .host_stop = nv_host_stop,
24949 };
24950
24951-static struct ata_port_operations nv100_port_ops = {
24952+static const struct ata_port_operations nv100_port_ops = {
24953 .inherits = &nv_base_port_ops,
24954 .set_piomode = nv100_set_piomode,
24955 .set_dmamode = nv100_set_dmamode,
24956 };
24957
24958-static struct ata_port_operations nv133_port_ops = {
24959+static const struct ata_port_operations nv133_port_ops = {
24960 .inherits = &nv_base_port_ops,
24961 .set_piomode = nv133_set_piomode,
24962 .set_dmamode = nv133_set_dmamode,
24963diff -urNp linux-2.6.32.46/drivers/ata/pata_artop.c linux-2.6.32.46/drivers/ata/pata_artop.c
24964--- linux-2.6.32.46/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
24965+++ linux-2.6.32.46/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
24966@@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
24967 ATA_BMDMA_SHT(DRV_NAME),
24968 };
24969
24970-static struct ata_port_operations artop6210_ops = {
24971+static const struct ata_port_operations artop6210_ops = {
24972 .inherits = &ata_bmdma_port_ops,
24973 .cable_detect = ata_cable_40wire,
24974 .set_piomode = artop6210_set_piomode,
24975@@ -320,7 +320,7 @@ static struct ata_port_operations artop6
24976 .qc_defer = artop6210_qc_defer,
24977 };
24978
24979-static struct ata_port_operations artop6260_ops = {
24980+static const struct ata_port_operations artop6260_ops = {
24981 .inherits = &ata_bmdma_port_ops,
24982 .cable_detect = artop6260_cable_detect,
24983 .set_piomode = artop6260_set_piomode,
24984diff -urNp linux-2.6.32.46/drivers/ata/pata_at32.c linux-2.6.32.46/drivers/ata/pata_at32.c
24985--- linux-2.6.32.46/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
24986+++ linux-2.6.32.46/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
24987@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
24988 ATA_PIO_SHT(DRV_NAME),
24989 };
24990
24991-static struct ata_port_operations at32_port_ops = {
24992+static const struct ata_port_operations at32_port_ops = {
24993 .inherits = &ata_sff_port_ops,
24994 .cable_detect = ata_cable_40wire,
24995 .set_piomode = pata_at32_set_piomode,
24996diff -urNp linux-2.6.32.46/drivers/ata/pata_at91.c linux-2.6.32.46/drivers/ata/pata_at91.c
24997--- linux-2.6.32.46/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
24998+++ linux-2.6.32.46/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
24999@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
25000 ATA_PIO_SHT(DRV_NAME),
25001 };
25002
25003-static struct ata_port_operations pata_at91_port_ops = {
25004+static const struct ata_port_operations pata_at91_port_ops = {
25005 .inherits = &ata_sff_port_ops,
25006
25007 .sff_data_xfer = pata_at91_data_xfer_noirq,
25008diff -urNp linux-2.6.32.46/drivers/ata/pata_atiixp.c linux-2.6.32.46/drivers/ata/pata_atiixp.c
25009--- linux-2.6.32.46/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
25010+++ linux-2.6.32.46/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
25011@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
25012 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25013 };
25014
25015-static struct ata_port_operations atiixp_port_ops = {
25016+static const struct ata_port_operations atiixp_port_ops = {
25017 .inherits = &ata_bmdma_port_ops,
25018
25019 .qc_prep = ata_sff_dumb_qc_prep,
25020diff -urNp linux-2.6.32.46/drivers/ata/pata_atp867x.c linux-2.6.32.46/drivers/ata/pata_atp867x.c
25021--- linux-2.6.32.46/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
25022+++ linux-2.6.32.46/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
25023@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
25024 ATA_BMDMA_SHT(DRV_NAME),
25025 };
25026
25027-static struct ata_port_operations atp867x_ops = {
25028+static const struct ata_port_operations atp867x_ops = {
25029 .inherits = &ata_bmdma_port_ops,
25030 .cable_detect = atp867x_cable_detect,
25031 .set_piomode = atp867x_set_piomode,
25032diff -urNp linux-2.6.32.46/drivers/ata/pata_bf54x.c linux-2.6.32.46/drivers/ata/pata_bf54x.c
25033--- linux-2.6.32.46/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
25034+++ linux-2.6.32.46/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
25035@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
25036 .dma_boundary = ATA_DMA_BOUNDARY,
25037 };
25038
25039-static struct ata_port_operations bfin_pata_ops = {
25040+static const struct ata_port_operations bfin_pata_ops = {
25041 .inherits = &ata_sff_port_ops,
25042
25043 .set_piomode = bfin_set_piomode,
25044diff -urNp linux-2.6.32.46/drivers/ata/pata_cmd640.c linux-2.6.32.46/drivers/ata/pata_cmd640.c
25045--- linux-2.6.32.46/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
25046+++ linux-2.6.32.46/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
25047@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
25048 ATA_BMDMA_SHT(DRV_NAME),
25049 };
25050
25051-static struct ata_port_operations cmd640_port_ops = {
25052+static const struct ata_port_operations cmd640_port_ops = {
25053 .inherits = &ata_bmdma_port_ops,
25054 /* In theory xfer_noirq is not needed once we kill the prefetcher */
25055 .sff_data_xfer = ata_sff_data_xfer_noirq,
25056diff -urNp linux-2.6.32.46/drivers/ata/pata_cmd64x.c linux-2.6.32.46/drivers/ata/pata_cmd64x.c
25057--- linux-2.6.32.46/drivers/ata/pata_cmd64x.c 2011-06-25 12:55:34.000000000 -0400
25058+++ linux-2.6.32.46/drivers/ata/pata_cmd64x.c 2011-06-25 12:56:37.000000000 -0400
25059@@ -271,18 +271,18 @@ static const struct ata_port_operations
25060 .set_dmamode = cmd64x_set_dmamode,
25061 };
25062
25063-static struct ata_port_operations cmd64x_port_ops = {
25064+static const struct ata_port_operations cmd64x_port_ops = {
25065 .inherits = &cmd64x_base_ops,
25066 .cable_detect = ata_cable_40wire,
25067 };
25068
25069-static struct ata_port_operations cmd646r1_port_ops = {
25070+static const struct ata_port_operations cmd646r1_port_ops = {
25071 .inherits = &cmd64x_base_ops,
25072 .bmdma_stop = cmd646r1_bmdma_stop,
25073 .cable_detect = ata_cable_40wire,
25074 };
25075
25076-static struct ata_port_operations cmd648_port_ops = {
25077+static const struct ata_port_operations cmd648_port_ops = {
25078 .inherits = &cmd64x_base_ops,
25079 .bmdma_stop = cmd648_bmdma_stop,
25080 .cable_detect = cmd648_cable_detect,
25081diff -urNp linux-2.6.32.46/drivers/ata/pata_cs5520.c linux-2.6.32.46/drivers/ata/pata_cs5520.c
25082--- linux-2.6.32.46/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
25083+++ linux-2.6.32.46/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
25084@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
25085 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25086 };
25087
25088-static struct ata_port_operations cs5520_port_ops = {
25089+static const struct ata_port_operations cs5520_port_ops = {
25090 .inherits = &ata_bmdma_port_ops,
25091 .qc_prep = ata_sff_dumb_qc_prep,
25092 .cable_detect = ata_cable_40wire,
25093diff -urNp linux-2.6.32.46/drivers/ata/pata_cs5530.c linux-2.6.32.46/drivers/ata/pata_cs5530.c
25094--- linux-2.6.32.46/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
25095+++ linux-2.6.32.46/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
25096@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
25097 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25098 };
25099
25100-static struct ata_port_operations cs5530_port_ops = {
25101+static const struct ata_port_operations cs5530_port_ops = {
25102 .inherits = &ata_bmdma_port_ops,
25103
25104 .qc_prep = ata_sff_dumb_qc_prep,
25105diff -urNp linux-2.6.32.46/drivers/ata/pata_cs5535.c linux-2.6.32.46/drivers/ata/pata_cs5535.c
25106--- linux-2.6.32.46/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
25107+++ linux-2.6.32.46/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
25108@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
25109 ATA_BMDMA_SHT(DRV_NAME),
25110 };
25111
25112-static struct ata_port_operations cs5535_port_ops = {
25113+static const struct ata_port_operations cs5535_port_ops = {
25114 .inherits = &ata_bmdma_port_ops,
25115 .cable_detect = cs5535_cable_detect,
25116 .set_piomode = cs5535_set_piomode,
25117diff -urNp linux-2.6.32.46/drivers/ata/pata_cs5536.c linux-2.6.32.46/drivers/ata/pata_cs5536.c
25118--- linux-2.6.32.46/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
25119+++ linux-2.6.32.46/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
25120@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
25121 ATA_BMDMA_SHT(DRV_NAME),
25122 };
25123
25124-static struct ata_port_operations cs5536_port_ops = {
25125+static const struct ata_port_operations cs5536_port_ops = {
25126 .inherits = &ata_bmdma_port_ops,
25127 .cable_detect = cs5536_cable_detect,
25128 .set_piomode = cs5536_set_piomode,
25129diff -urNp linux-2.6.32.46/drivers/ata/pata_cypress.c linux-2.6.32.46/drivers/ata/pata_cypress.c
25130--- linux-2.6.32.46/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
25131+++ linux-2.6.32.46/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
25132@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
25133 ATA_BMDMA_SHT(DRV_NAME),
25134 };
25135
25136-static struct ata_port_operations cy82c693_port_ops = {
25137+static const struct ata_port_operations cy82c693_port_ops = {
25138 .inherits = &ata_bmdma_port_ops,
25139 .cable_detect = ata_cable_40wire,
25140 .set_piomode = cy82c693_set_piomode,
25141diff -urNp linux-2.6.32.46/drivers/ata/pata_efar.c linux-2.6.32.46/drivers/ata/pata_efar.c
25142--- linux-2.6.32.46/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
25143+++ linux-2.6.32.46/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
25144@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
25145 ATA_BMDMA_SHT(DRV_NAME),
25146 };
25147
25148-static struct ata_port_operations efar_ops = {
25149+static const struct ata_port_operations efar_ops = {
25150 .inherits = &ata_bmdma_port_ops,
25151 .cable_detect = efar_cable_detect,
25152 .set_piomode = efar_set_piomode,
25153diff -urNp linux-2.6.32.46/drivers/ata/pata_hpt366.c linux-2.6.32.46/drivers/ata/pata_hpt366.c
25154--- linux-2.6.32.46/drivers/ata/pata_hpt366.c 2011-06-25 12:55:34.000000000 -0400
25155+++ linux-2.6.32.46/drivers/ata/pata_hpt366.c 2011-06-25 12:56:37.000000000 -0400
25156@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
25157 * Configuration for HPT366/68
25158 */
25159
25160-static struct ata_port_operations hpt366_port_ops = {
25161+static const struct ata_port_operations hpt366_port_ops = {
25162 .inherits = &ata_bmdma_port_ops,
25163 .cable_detect = hpt36x_cable_detect,
25164 .mode_filter = hpt366_filter,
25165diff -urNp linux-2.6.32.46/drivers/ata/pata_hpt37x.c linux-2.6.32.46/drivers/ata/pata_hpt37x.c
25166--- linux-2.6.32.46/drivers/ata/pata_hpt37x.c 2011-06-25 12:55:34.000000000 -0400
25167+++ linux-2.6.32.46/drivers/ata/pata_hpt37x.c 2011-06-25 12:56:37.000000000 -0400
25168@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
25169 * Configuration for HPT370
25170 */
25171
25172-static struct ata_port_operations hpt370_port_ops = {
25173+static const struct ata_port_operations hpt370_port_ops = {
25174 .inherits = &ata_bmdma_port_ops,
25175
25176 .bmdma_stop = hpt370_bmdma_stop,
25177@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
25178 * Configuration for HPT370A. Close to 370 but less filters
25179 */
25180
25181-static struct ata_port_operations hpt370a_port_ops = {
25182+static const struct ata_port_operations hpt370a_port_ops = {
25183 .inherits = &hpt370_port_ops,
25184 .mode_filter = hpt370a_filter,
25185 };
25186@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
25187 * and DMA mode setting functionality.
25188 */
25189
25190-static struct ata_port_operations hpt372_port_ops = {
25191+static const struct ata_port_operations hpt372_port_ops = {
25192 .inherits = &ata_bmdma_port_ops,
25193
25194 .bmdma_stop = hpt37x_bmdma_stop,
25195@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
25196 * but we have a different cable detection procedure for function 1.
25197 */
25198
25199-static struct ata_port_operations hpt374_fn1_port_ops = {
25200+static const struct ata_port_operations hpt374_fn1_port_ops = {
25201 .inherits = &hpt372_port_ops,
25202 .prereset = hpt374_fn1_pre_reset,
25203 };
25204diff -urNp linux-2.6.32.46/drivers/ata/pata_hpt3x2n.c linux-2.6.32.46/drivers/ata/pata_hpt3x2n.c
25205--- linux-2.6.32.46/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:55:34.000000000 -0400
25206+++ linux-2.6.32.46/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:56:37.000000000 -0400
25207@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
25208 * Configuration for HPT3x2n.
25209 */
25210
25211-static struct ata_port_operations hpt3x2n_port_ops = {
25212+static const struct ata_port_operations hpt3x2n_port_ops = {
25213 .inherits = &ata_bmdma_port_ops,
25214
25215 .bmdma_stop = hpt3x2n_bmdma_stop,
25216diff -urNp linux-2.6.32.46/drivers/ata/pata_hpt3x3.c linux-2.6.32.46/drivers/ata/pata_hpt3x3.c
25217--- linux-2.6.32.46/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
25218+++ linux-2.6.32.46/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
25219@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
25220 ATA_BMDMA_SHT(DRV_NAME),
25221 };
25222
25223-static struct ata_port_operations hpt3x3_port_ops = {
25224+static const struct ata_port_operations hpt3x3_port_ops = {
25225 .inherits = &ata_bmdma_port_ops,
25226 .cable_detect = ata_cable_40wire,
25227 .set_piomode = hpt3x3_set_piomode,
25228diff -urNp linux-2.6.32.46/drivers/ata/pata_icside.c linux-2.6.32.46/drivers/ata/pata_icside.c
25229--- linux-2.6.32.46/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
25230+++ linux-2.6.32.46/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
25231@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
25232 }
25233 }
25234
25235-static struct ata_port_operations pata_icside_port_ops = {
25236+static const struct ata_port_operations pata_icside_port_ops = {
25237 .inherits = &ata_sff_port_ops,
25238 /* no need to build any PRD tables for DMA */
25239 .qc_prep = ata_noop_qc_prep,
25240diff -urNp linux-2.6.32.46/drivers/ata/pata_isapnp.c linux-2.6.32.46/drivers/ata/pata_isapnp.c
25241--- linux-2.6.32.46/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
25242+++ linux-2.6.32.46/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
25243@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
25244 ATA_PIO_SHT(DRV_NAME),
25245 };
25246
25247-static struct ata_port_operations isapnp_port_ops = {
25248+static const struct ata_port_operations isapnp_port_ops = {
25249 .inherits = &ata_sff_port_ops,
25250 .cable_detect = ata_cable_40wire,
25251 };
25252
25253-static struct ata_port_operations isapnp_noalt_port_ops = {
25254+static const struct ata_port_operations isapnp_noalt_port_ops = {
25255 .inherits = &ata_sff_port_ops,
25256 .cable_detect = ata_cable_40wire,
25257 /* No altstatus so we don't want to use the lost interrupt poll */
25258diff -urNp linux-2.6.32.46/drivers/ata/pata_it8213.c linux-2.6.32.46/drivers/ata/pata_it8213.c
25259--- linux-2.6.32.46/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
25260+++ linux-2.6.32.46/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
25261@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
25262 };
25263
25264
25265-static struct ata_port_operations it8213_ops = {
25266+static const struct ata_port_operations it8213_ops = {
25267 .inherits = &ata_bmdma_port_ops,
25268 .cable_detect = it8213_cable_detect,
25269 .set_piomode = it8213_set_piomode,
25270diff -urNp linux-2.6.32.46/drivers/ata/pata_it821x.c linux-2.6.32.46/drivers/ata/pata_it821x.c
25271--- linux-2.6.32.46/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
25272+++ linux-2.6.32.46/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
25273@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
25274 ATA_BMDMA_SHT(DRV_NAME),
25275 };
25276
25277-static struct ata_port_operations it821x_smart_port_ops = {
25278+static const struct ata_port_operations it821x_smart_port_ops = {
25279 .inherits = &ata_bmdma_port_ops,
25280
25281 .check_atapi_dma= it821x_check_atapi_dma,
25282@@ -814,7 +814,7 @@ static struct ata_port_operations it821x
25283 .port_start = it821x_port_start,
25284 };
25285
25286-static struct ata_port_operations it821x_passthru_port_ops = {
25287+static const struct ata_port_operations it821x_passthru_port_ops = {
25288 .inherits = &ata_bmdma_port_ops,
25289
25290 .check_atapi_dma= it821x_check_atapi_dma,
25291@@ -830,7 +830,7 @@ static struct ata_port_operations it821x
25292 .port_start = it821x_port_start,
25293 };
25294
25295-static struct ata_port_operations it821x_rdc_port_ops = {
25296+static const struct ata_port_operations it821x_rdc_port_ops = {
25297 .inherits = &ata_bmdma_port_ops,
25298
25299 .check_atapi_dma= it821x_check_atapi_dma,
25300diff -urNp linux-2.6.32.46/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.46/drivers/ata/pata_ixp4xx_cf.c
25301--- linux-2.6.32.46/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
25302+++ linux-2.6.32.46/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
25303@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
25304 ATA_PIO_SHT(DRV_NAME),
25305 };
25306
25307-static struct ata_port_operations ixp4xx_port_ops = {
25308+static const struct ata_port_operations ixp4xx_port_ops = {
25309 .inherits = &ata_sff_port_ops,
25310 .sff_data_xfer = ixp4xx_mmio_data_xfer,
25311 .cable_detect = ata_cable_40wire,
25312diff -urNp linux-2.6.32.46/drivers/ata/pata_jmicron.c linux-2.6.32.46/drivers/ata/pata_jmicron.c
25313--- linux-2.6.32.46/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
25314+++ linux-2.6.32.46/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
25315@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
25316 ATA_BMDMA_SHT(DRV_NAME),
25317 };
25318
25319-static struct ata_port_operations jmicron_ops = {
25320+static const struct ata_port_operations jmicron_ops = {
25321 .inherits = &ata_bmdma_port_ops,
25322 .prereset = jmicron_pre_reset,
25323 };
25324diff -urNp linux-2.6.32.46/drivers/ata/pata_legacy.c linux-2.6.32.46/drivers/ata/pata_legacy.c
25325--- linux-2.6.32.46/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
25326+++ linux-2.6.32.46/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
25327@@ -106,7 +106,7 @@ struct legacy_probe {
25328
25329 struct legacy_controller {
25330 const char *name;
25331- struct ata_port_operations *ops;
25332+ const struct ata_port_operations *ops;
25333 unsigned int pio_mask;
25334 unsigned int flags;
25335 unsigned int pflags;
25336@@ -223,12 +223,12 @@ static const struct ata_port_operations
25337 * pio_mask as well.
25338 */
25339
25340-static struct ata_port_operations simple_port_ops = {
25341+static const struct ata_port_operations simple_port_ops = {
25342 .inherits = &legacy_base_port_ops,
25343 .sff_data_xfer = ata_sff_data_xfer_noirq,
25344 };
25345
25346-static struct ata_port_operations legacy_port_ops = {
25347+static const struct ata_port_operations legacy_port_ops = {
25348 .inherits = &legacy_base_port_ops,
25349 .sff_data_xfer = ata_sff_data_xfer_noirq,
25350 .set_mode = legacy_set_mode,
25351@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
25352 return buflen;
25353 }
25354
25355-static struct ata_port_operations pdc20230_port_ops = {
25356+static const struct ata_port_operations pdc20230_port_ops = {
25357 .inherits = &legacy_base_port_ops,
25358 .set_piomode = pdc20230_set_piomode,
25359 .sff_data_xfer = pdc_data_xfer_vlb,
25360@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
25361 ioread8(ap->ioaddr.status_addr);
25362 }
25363
25364-static struct ata_port_operations ht6560a_port_ops = {
25365+static const struct ata_port_operations ht6560a_port_ops = {
25366 .inherits = &legacy_base_port_ops,
25367 .set_piomode = ht6560a_set_piomode,
25368 };
25369@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
25370 ioread8(ap->ioaddr.status_addr);
25371 }
25372
25373-static struct ata_port_operations ht6560b_port_ops = {
25374+static const struct ata_port_operations ht6560b_port_ops = {
25375 .inherits = &legacy_base_port_ops,
25376 .set_piomode = ht6560b_set_piomode,
25377 };
25378@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
25379 }
25380
25381
25382-static struct ata_port_operations opti82c611a_port_ops = {
25383+static const struct ata_port_operations opti82c611a_port_ops = {
25384 .inherits = &legacy_base_port_ops,
25385 .set_piomode = opti82c611a_set_piomode,
25386 };
25387@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
25388 return ata_sff_qc_issue(qc);
25389 }
25390
25391-static struct ata_port_operations opti82c46x_port_ops = {
25392+static const struct ata_port_operations opti82c46x_port_ops = {
25393 .inherits = &legacy_base_port_ops,
25394 .set_piomode = opti82c46x_set_piomode,
25395 .qc_issue = opti82c46x_qc_issue,
25396@@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
25397 return 0;
25398 }
25399
25400-static struct ata_port_operations qdi6500_port_ops = {
25401+static const struct ata_port_operations qdi6500_port_ops = {
25402 .inherits = &legacy_base_port_ops,
25403 .set_piomode = qdi6500_set_piomode,
25404 .qc_issue = qdi_qc_issue,
25405 .sff_data_xfer = vlb32_data_xfer,
25406 };
25407
25408-static struct ata_port_operations qdi6580_port_ops = {
25409+static const struct ata_port_operations qdi6580_port_ops = {
25410 .inherits = &legacy_base_port_ops,
25411 .set_piomode = qdi6580_set_piomode,
25412 .sff_data_xfer = vlb32_data_xfer,
25413 };
25414
25415-static struct ata_port_operations qdi6580dp_port_ops = {
25416+static const struct ata_port_operations qdi6580dp_port_ops = {
25417 .inherits = &legacy_base_port_ops,
25418 .set_piomode = qdi6580dp_set_piomode,
25419 .sff_data_xfer = vlb32_data_xfer,
25420@@ -855,7 +855,7 @@ static int winbond_port(struct platform_
25421 return 0;
25422 }
25423
25424-static struct ata_port_operations winbond_port_ops = {
25425+static const struct ata_port_operations winbond_port_ops = {
25426 .inherits = &legacy_base_port_ops,
25427 .set_piomode = winbond_set_piomode,
25428 .sff_data_xfer = vlb32_data_xfer,
25429@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
25430 int pio_modes = controller->pio_mask;
25431 unsigned long io = probe->port;
25432 u32 mask = (1 << probe->slot);
25433- struct ata_port_operations *ops = controller->ops;
25434+ const struct ata_port_operations *ops = controller->ops;
25435 struct legacy_data *ld = &legacy_data[probe->slot];
25436 struct ata_host *host = NULL;
25437 struct ata_port *ap;
25438diff -urNp linux-2.6.32.46/drivers/ata/pata_marvell.c linux-2.6.32.46/drivers/ata/pata_marvell.c
25439--- linux-2.6.32.46/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
25440+++ linux-2.6.32.46/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
25441@@ -100,7 +100,7 @@ static struct scsi_host_template marvell
25442 ATA_BMDMA_SHT(DRV_NAME),
25443 };
25444
25445-static struct ata_port_operations marvell_ops = {
25446+static const struct ata_port_operations marvell_ops = {
25447 .inherits = &ata_bmdma_port_ops,
25448 .cable_detect = marvell_cable_detect,
25449 .prereset = marvell_pre_reset,
25450diff -urNp linux-2.6.32.46/drivers/ata/pata_mpc52xx.c linux-2.6.32.46/drivers/ata/pata_mpc52xx.c
25451--- linux-2.6.32.46/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
25452+++ linux-2.6.32.46/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
25453@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
25454 ATA_PIO_SHT(DRV_NAME),
25455 };
25456
25457-static struct ata_port_operations mpc52xx_ata_port_ops = {
25458+static const struct ata_port_operations mpc52xx_ata_port_ops = {
25459 .inherits = &ata_bmdma_port_ops,
25460 .sff_dev_select = mpc52xx_ata_dev_select,
25461 .set_piomode = mpc52xx_ata_set_piomode,
25462diff -urNp linux-2.6.32.46/drivers/ata/pata_mpiix.c linux-2.6.32.46/drivers/ata/pata_mpiix.c
25463--- linux-2.6.32.46/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
25464+++ linux-2.6.32.46/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
25465@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
25466 ATA_PIO_SHT(DRV_NAME),
25467 };
25468
25469-static struct ata_port_operations mpiix_port_ops = {
25470+static const struct ata_port_operations mpiix_port_ops = {
25471 .inherits = &ata_sff_port_ops,
25472 .qc_issue = mpiix_qc_issue,
25473 .cable_detect = ata_cable_40wire,
25474diff -urNp linux-2.6.32.46/drivers/ata/pata_netcell.c linux-2.6.32.46/drivers/ata/pata_netcell.c
25475--- linux-2.6.32.46/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
25476+++ linux-2.6.32.46/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
25477@@ -34,7 +34,7 @@ static struct scsi_host_template netcell
25478 ATA_BMDMA_SHT(DRV_NAME),
25479 };
25480
25481-static struct ata_port_operations netcell_ops = {
25482+static const struct ata_port_operations netcell_ops = {
25483 .inherits = &ata_bmdma_port_ops,
25484 .cable_detect = ata_cable_80wire,
25485 .read_id = netcell_read_id,
25486diff -urNp linux-2.6.32.46/drivers/ata/pata_ninja32.c linux-2.6.32.46/drivers/ata/pata_ninja32.c
25487--- linux-2.6.32.46/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
25488+++ linux-2.6.32.46/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
25489@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
25490 ATA_BMDMA_SHT(DRV_NAME),
25491 };
25492
25493-static struct ata_port_operations ninja32_port_ops = {
25494+static const struct ata_port_operations ninja32_port_ops = {
25495 .inherits = &ata_bmdma_port_ops,
25496 .sff_dev_select = ninja32_dev_select,
25497 .cable_detect = ata_cable_40wire,
25498diff -urNp linux-2.6.32.46/drivers/ata/pata_ns87410.c linux-2.6.32.46/drivers/ata/pata_ns87410.c
25499--- linux-2.6.32.46/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
25500+++ linux-2.6.32.46/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
25501@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
25502 ATA_PIO_SHT(DRV_NAME),
25503 };
25504
25505-static struct ata_port_operations ns87410_port_ops = {
25506+static const struct ata_port_operations ns87410_port_ops = {
25507 .inherits = &ata_sff_port_ops,
25508 .qc_issue = ns87410_qc_issue,
25509 .cable_detect = ata_cable_40wire,
25510diff -urNp linux-2.6.32.46/drivers/ata/pata_ns87415.c linux-2.6.32.46/drivers/ata/pata_ns87415.c
25511--- linux-2.6.32.46/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
25512+++ linux-2.6.32.46/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
25513@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
25514 }
25515 #endif /* 87560 SuperIO Support */
25516
25517-static struct ata_port_operations ns87415_pata_ops = {
25518+static const struct ata_port_operations ns87415_pata_ops = {
25519 .inherits = &ata_bmdma_port_ops,
25520
25521 .check_atapi_dma = ns87415_check_atapi_dma,
25522@@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
25523 };
25524
25525 #if defined(CONFIG_SUPERIO)
25526-static struct ata_port_operations ns87560_pata_ops = {
25527+static const struct ata_port_operations ns87560_pata_ops = {
25528 .inherits = &ns87415_pata_ops,
25529 .sff_tf_read = ns87560_tf_read,
25530 .sff_check_status = ns87560_check_status,
25531diff -urNp linux-2.6.32.46/drivers/ata/pata_octeon_cf.c linux-2.6.32.46/drivers/ata/pata_octeon_cf.c
25532--- linux-2.6.32.46/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
25533+++ linux-2.6.32.46/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
25534@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
25535 return 0;
25536 }
25537
25538+/* cannot be const */
25539 static struct ata_port_operations octeon_cf_ops = {
25540 .inherits = &ata_sff_port_ops,
25541 .check_atapi_dma = octeon_cf_check_atapi_dma,
25542diff -urNp linux-2.6.32.46/drivers/ata/pata_oldpiix.c linux-2.6.32.46/drivers/ata/pata_oldpiix.c
25543--- linux-2.6.32.46/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
25544+++ linux-2.6.32.46/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
25545@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
25546 ATA_BMDMA_SHT(DRV_NAME),
25547 };
25548
25549-static struct ata_port_operations oldpiix_pata_ops = {
25550+static const struct ata_port_operations oldpiix_pata_ops = {
25551 .inherits = &ata_bmdma_port_ops,
25552 .qc_issue = oldpiix_qc_issue,
25553 .cable_detect = ata_cable_40wire,
25554diff -urNp linux-2.6.32.46/drivers/ata/pata_opti.c linux-2.6.32.46/drivers/ata/pata_opti.c
25555--- linux-2.6.32.46/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
25556+++ linux-2.6.32.46/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
25557@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
25558 ATA_PIO_SHT(DRV_NAME),
25559 };
25560
25561-static struct ata_port_operations opti_port_ops = {
25562+static const struct ata_port_operations opti_port_ops = {
25563 .inherits = &ata_sff_port_ops,
25564 .cable_detect = ata_cable_40wire,
25565 .set_piomode = opti_set_piomode,
25566diff -urNp linux-2.6.32.46/drivers/ata/pata_optidma.c linux-2.6.32.46/drivers/ata/pata_optidma.c
25567--- linux-2.6.32.46/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
25568+++ linux-2.6.32.46/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
25569@@ -337,7 +337,7 @@ static struct scsi_host_template optidma
25570 ATA_BMDMA_SHT(DRV_NAME),
25571 };
25572
25573-static struct ata_port_operations optidma_port_ops = {
25574+static const struct ata_port_operations optidma_port_ops = {
25575 .inherits = &ata_bmdma_port_ops,
25576 .cable_detect = ata_cable_40wire,
25577 .set_piomode = optidma_set_pio_mode,
25578@@ -346,7 +346,7 @@ static struct ata_port_operations optidm
25579 .prereset = optidma_pre_reset,
25580 };
25581
25582-static struct ata_port_operations optiplus_port_ops = {
25583+static const struct ata_port_operations optiplus_port_ops = {
25584 .inherits = &optidma_port_ops,
25585 .set_piomode = optiplus_set_pio_mode,
25586 .set_dmamode = optiplus_set_dma_mode,
25587diff -urNp linux-2.6.32.46/drivers/ata/pata_palmld.c linux-2.6.32.46/drivers/ata/pata_palmld.c
25588--- linux-2.6.32.46/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
25589+++ linux-2.6.32.46/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
25590@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
25591 ATA_PIO_SHT(DRV_NAME),
25592 };
25593
25594-static struct ata_port_operations palmld_port_ops = {
25595+static const struct ata_port_operations palmld_port_ops = {
25596 .inherits = &ata_sff_port_ops,
25597 .sff_data_xfer = ata_sff_data_xfer_noirq,
25598 .cable_detect = ata_cable_40wire,
25599diff -urNp linux-2.6.32.46/drivers/ata/pata_pcmcia.c linux-2.6.32.46/drivers/ata/pata_pcmcia.c
25600--- linux-2.6.32.46/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
25601+++ linux-2.6.32.46/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
25602@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
25603 ATA_PIO_SHT(DRV_NAME),
25604 };
25605
25606-static struct ata_port_operations pcmcia_port_ops = {
25607+static const struct ata_port_operations pcmcia_port_ops = {
25608 .inherits = &ata_sff_port_ops,
25609 .sff_data_xfer = ata_sff_data_xfer_noirq,
25610 .cable_detect = ata_cable_40wire,
25611 .set_mode = pcmcia_set_mode,
25612 };
25613
25614-static struct ata_port_operations pcmcia_8bit_port_ops = {
25615+static const struct ata_port_operations pcmcia_8bit_port_ops = {
25616 .inherits = &ata_sff_port_ops,
25617 .sff_data_xfer = ata_data_xfer_8bit,
25618 .cable_detect = ata_cable_40wire,
25619@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
25620 unsigned long io_base, ctl_base;
25621 void __iomem *io_addr, *ctl_addr;
25622 int n_ports = 1;
25623- struct ata_port_operations *ops = &pcmcia_port_ops;
25624+ const struct ata_port_operations *ops = &pcmcia_port_ops;
25625
25626 info = kzalloc(sizeof(*info), GFP_KERNEL);
25627 if (info == NULL)
25628diff -urNp linux-2.6.32.46/drivers/ata/pata_pdc2027x.c linux-2.6.32.46/drivers/ata/pata_pdc2027x.c
25629--- linux-2.6.32.46/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
25630+++ linux-2.6.32.46/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
25631@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
25632 ATA_BMDMA_SHT(DRV_NAME),
25633 };
25634
25635-static struct ata_port_operations pdc2027x_pata100_ops = {
25636+static const struct ata_port_operations pdc2027x_pata100_ops = {
25637 .inherits = &ata_bmdma_port_ops,
25638 .check_atapi_dma = pdc2027x_check_atapi_dma,
25639 .cable_detect = pdc2027x_cable_detect,
25640 .prereset = pdc2027x_prereset,
25641 };
25642
25643-static struct ata_port_operations pdc2027x_pata133_ops = {
25644+static const struct ata_port_operations pdc2027x_pata133_ops = {
25645 .inherits = &pdc2027x_pata100_ops,
25646 .mode_filter = pdc2027x_mode_filter,
25647 .set_piomode = pdc2027x_set_piomode,
25648diff -urNp linux-2.6.32.46/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.46/drivers/ata/pata_pdc202xx_old.c
25649--- linux-2.6.32.46/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
25650+++ linux-2.6.32.46/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
25651@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
25652 ATA_BMDMA_SHT(DRV_NAME),
25653 };
25654
25655-static struct ata_port_operations pdc2024x_port_ops = {
25656+static const struct ata_port_operations pdc2024x_port_ops = {
25657 .inherits = &ata_bmdma_port_ops,
25658
25659 .cable_detect = ata_cable_40wire,
25660@@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
25661 .sff_exec_command = pdc202xx_exec_command,
25662 };
25663
25664-static struct ata_port_operations pdc2026x_port_ops = {
25665+static const struct ata_port_operations pdc2026x_port_ops = {
25666 .inherits = &pdc2024x_port_ops,
25667
25668 .check_atapi_dma = pdc2026x_check_atapi_dma,
25669diff -urNp linux-2.6.32.46/drivers/ata/pata_platform.c linux-2.6.32.46/drivers/ata/pata_platform.c
25670--- linux-2.6.32.46/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
25671+++ linux-2.6.32.46/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
25672@@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
25673 ATA_PIO_SHT(DRV_NAME),
25674 };
25675
25676-static struct ata_port_operations pata_platform_port_ops = {
25677+static const struct ata_port_operations pata_platform_port_ops = {
25678 .inherits = &ata_sff_port_ops,
25679 .sff_data_xfer = ata_sff_data_xfer_noirq,
25680 .cable_detect = ata_cable_unknown,
25681diff -urNp linux-2.6.32.46/drivers/ata/pata_qdi.c linux-2.6.32.46/drivers/ata/pata_qdi.c
25682--- linux-2.6.32.46/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
25683+++ linux-2.6.32.46/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
25684@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
25685 ATA_PIO_SHT(DRV_NAME),
25686 };
25687
25688-static struct ata_port_operations qdi6500_port_ops = {
25689+static const struct ata_port_operations qdi6500_port_ops = {
25690 .inherits = &ata_sff_port_ops,
25691 .qc_issue = qdi_qc_issue,
25692 .sff_data_xfer = qdi_data_xfer,
25693@@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
25694 .set_piomode = qdi6500_set_piomode,
25695 };
25696
25697-static struct ata_port_operations qdi6580_port_ops = {
25698+static const struct ata_port_operations qdi6580_port_ops = {
25699 .inherits = &qdi6500_port_ops,
25700 .set_piomode = qdi6580_set_piomode,
25701 };
25702diff -urNp linux-2.6.32.46/drivers/ata/pata_radisys.c linux-2.6.32.46/drivers/ata/pata_radisys.c
25703--- linux-2.6.32.46/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
25704+++ linux-2.6.32.46/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
25705@@ -187,7 +187,7 @@ static struct scsi_host_template radisys
25706 ATA_BMDMA_SHT(DRV_NAME),
25707 };
25708
25709-static struct ata_port_operations radisys_pata_ops = {
25710+static const struct ata_port_operations radisys_pata_ops = {
25711 .inherits = &ata_bmdma_port_ops,
25712 .qc_issue = radisys_qc_issue,
25713 .cable_detect = ata_cable_unknown,
25714diff -urNp linux-2.6.32.46/drivers/ata/pata_rb532_cf.c linux-2.6.32.46/drivers/ata/pata_rb532_cf.c
25715--- linux-2.6.32.46/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
25716+++ linux-2.6.32.46/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
25717@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
25718 return IRQ_HANDLED;
25719 }
25720
25721-static struct ata_port_operations rb532_pata_port_ops = {
25722+static const struct ata_port_operations rb532_pata_port_ops = {
25723 .inherits = &ata_sff_port_ops,
25724 .sff_data_xfer = ata_sff_data_xfer32,
25725 };
25726diff -urNp linux-2.6.32.46/drivers/ata/pata_rdc.c linux-2.6.32.46/drivers/ata/pata_rdc.c
25727--- linux-2.6.32.46/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
25728+++ linux-2.6.32.46/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
25729@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
25730 pci_write_config_byte(dev, 0x48, udma_enable);
25731 }
25732
25733-static struct ata_port_operations rdc_pata_ops = {
25734+static const struct ata_port_operations rdc_pata_ops = {
25735 .inherits = &ata_bmdma32_port_ops,
25736 .cable_detect = rdc_pata_cable_detect,
25737 .set_piomode = rdc_set_piomode,
25738diff -urNp linux-2.6.32.46/drivers/ata/pata_rz1000.c linux-2.6.32.46/drivers/ata/pata_rz1000.c
25739--- linux-2.6.32.46/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
25740+++ linux-2.6.32.46/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
25741@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
25742 ATA_PIO_SHT(DRV_NAME),
25743 };
25744
25745-static struct ata_port_operations rz1000_port_ops = {
25746+static const struct ata_port_operations rz1000_port_ops = {
25747 .inherits = &ata_sff_port_ops,
25748 .cable_detect = ata_cable_40wire,
25749 .set_mode = rz1000_set_mode,
25750diff -urNp linux-2.6.32.46/drivers/ata/pata_sc1200.c linux-2.6.32.46/drivers/ata/pata_sc1200.c
25751--- linux-2.6.32.46/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
25752+++ linux-2.6.32.46/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
25753@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
25754 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25755 };
25756
25757-static struct ata_port_operations sc1200_port_ops = {
25758+static const struct ata_port_operations sc1200_port_ops = {
25759 .inherits = &ata_bmdma_port_ops,
25760 .qc_prep = ata_sff_dumb_qc_prep,
25761 .qc_issue = sc1200_qc_issue,
25762diff -urNp linux-2.6.32.46/drivers/ata/pata_scc.c linux-2.6.32.46/drivers/ata/pata_scc.c
25763--- linux-2.6.32.46/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
25764+++ linux-2.6.32.46/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
25765@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
25766 ATA_BMDMA_SHT(DRV_NAME),
25767 };
25768
25769-static struct ata_port_operations scc_pata_ops = {
25770+static const struct ata_port_operations scc_pata_ops = {
25771 .inherits = &ata_bmdma_port_ops,
25772
25773 .set_piomode = scc_set_piomode,
25774diff -urNp linux-2.6.32.46/drivers/ata/pata_sch.c linux-2.6.32.46/drivers/ata/pata_sch.c
25775--- linux-2.6.32.46/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
25776+++ linux-2.6.32.46/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
25777@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
25778 ATA_BMDMA_SHT(DRV_NAME),
25779 };
25780
25781-static struct ata_port_operations sch_pata_ops = {
25782+static const struct ata_port_operations sch_pata_ops = {
25783 .inherits = &ata_bmdma_port_ops,
25784 .cable_detect = ata_cable_unknown,
25785 .set_piomode = sch_set_piomode,
25786diff -urNp linux-2.6.32.46/drivers/ata/pata_serverworks.c linux-2.6.32.46/drivers/ata/pata_serverworks.c
25787--- linux-2.6.32.46/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
25788+++ linux-2.6.32.46/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
25789@@ -299,7 +299,7 @@ static struct scsi_host_template serverw
25790 ATA_BMDMA_SHT(DRV_NAME),
25791 };
25792
25793-static struct ata_port_operations serverworks_osb4_port_ops = {
25794+static const struct ata_port_operations serverworks_osb4_port_ops = {
25795 .inherits = &ata_bmdma_port_ops,
25796 .cable_detect = serverworks_cable_detect,
25797 .mode_filter = serverworks_osb4_filter,
25798@@ -307,7 +307,7 @@ static struct ata_port_operations server
25799 .set_dmamode = serverworks_set_dmamode,
25800 };
25801
25802-static struct ata_port_operations serverworks_csb_port_ops = {
25803+static const struct ata_port_operations serverworks_csb_port_ops = {
25804 .inherits = &serverworks_osb4_port_ops,
25805 .mode_filter = serverworks_csb_filter,
25806 };
25807diff -urNp linux-2.6.32.46/drivers/ata/pata_sil680.c linux-2.6.32.46/drivers/ata/pata_sil680.c
25808--- linux-2.6.32.46/drivers/ata/pata_sil680.c 2011-06-25 12:55:34.000000000 -0400
25809+++ linux-2.6.32.46/drivers/ata/pata_sil680.c 2011-06-25 12:56:37.000000000 -0400
25810@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
25811 ATA_BMDMA_SHT(DRV_NAME),
25812 };
25813
25814-static struct ata_port_operations sil680_port_ops = {
25815+static const struct ata_port_operations sil680_port_ops = {
25816 .inherits = &ata_bmdma32_port_ops,
25817 .cable_detect = sil680_cable_detect,
25818 .set_piomode = sil680_set_piomode,
25819diff -urNp linux-2.6.32.46/drivers/ata/pata_sis.c linux-2.6.32.46/drivers/ata/pata_sis.c
25820--- linux-2.6.32.46/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
25821+++ linux-2.6.32.46/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
25822@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
25823 ATA_BMDMA_SHT(DRV_NAME),
25824 };
25825
25826-static struct ata_port_operations sis_133_for_sata_ops = {
25827+static const struct ata_port_operations sis_133_for_sata_ops = {
25828 .inherits = &ata_bmdma_port_ops,
25829 .set_piomode = sis_133_set_piomode,
25830 .set_dmamode = sis_133_set_dmamode,
25831 .cable_detect = sis_133_cable_detect,
25832 };
25833
25834-static struct ata_port_operations sis_base_ops = {
25835+static const struct ata_port_operations sis_base_ops = {
25836 .inherits = &ata_bmdma_port_ops,
25837 .prereset = sis_pre_reset,
25838 };
25839
25840-static struct ata_port_operations sis_133_ops = {
25841+static const struct ata_port_operations sis_133_ops = {
25842 .inherits = &sis_base_ops,
25843 .set_piomode = sis_133_set_piomode,
25844 .set_dmamode = sis_133_set_dmamode,
25845 .cable_detect = sis_133_cable_detect,
25846 };
25847
25848-static struct ata_port_operations sis_133_early_ops = {
25849+static const struct ata_port_operations sis_133_early_ops = {
25850 .inherits = &sis_base_ops,
25851 .set_piomode = sis_100_set_piomode,
25852 .set_dmamode = sis_133_early_set_dmamode,
25853 .cable_detect = sis_66_cable_detect,
25854 };
25855
25856-static struct ata_port_operations sis_100_ops = {
25857+static const struct ata_port_operations sis_100_ops = {
25858 .inherits = &sis_base_ops,
25859 .set_piomode = sis_100_set_piomode,
25860 .set_dmamode = sis_100_set_dmamode,
25861 .cable_detect = sis_66_cable_detect,
25862 };
25863
25864-static struct ata_port_operations sis_66_ops = {
25865+static const struct ata_port_operations sis_66_ops = {
25866 .inherits = &sis_base_ops,
25867 .set_piomode = sis_old_set_piomode,
25868 .set_dmamode = sis_66_set_dmamode,
25869 .cable_detect = sis_66_cable_detect,
25870 };
25871
25872-static struct ata_port_operations sis_old_ops = {
25873+static const struct ata_port_operations sis_old_ops = {
25874 .inherits = &sis_base_ops,
25875 .set_piomode = sis_old_set_piomode,
25876 .set_dmamode = sis_old_set_dmamode,
25877diff -urNp linux-2.6.32.46/drivers/ata/pata_sl82c105.c linux-2.6.32.46/drivers/ata/pata_sl82c105.c
25878--- linux-2.6.32.46/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
25879+++ linux-2.6.32.46/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
25880@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
25881 ATA_BMDMA_SHT(DRV_NAME),
25882 };
25883
25884-static struct ata_port_operations sl82c105_port_ops = {
25885+static const struct ata_port_operations sl82c105_port_ops = {
25886 .inherits = &ata_bmdma_port_ops,
25887 .qc_defer = sl82c105_qc_defer,
25888 .bmdma_start = sl82c105_bmdma_start,
25889diff -urNp linux-2.6.32.46/drivers/ata/pata_triflex.c linux-2.6.32.46/drivers/ata/pata_triflex.c
25890--- linux-2.6.32.46/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
25891+++ linux-2.6.32.46/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
25892@@ -178,7 +178,7 @@ static struct scsi_host_template triflex
25893 ATA_BMDMA_SHT(DRV_NAME),
25894 };
25895
25896-static struct ata_port_operations triflex_port_ops = {
25897+static const struct ata_port_operations triflex_port_ops = {
25898 .inherits = &ata_bmdma_port_ops,
25899 .bmdma_start = triflex_bmdma_start,
25900 .bmdma_stop = triflex_bmdma_stop,
25901diff -urNp linux-2.6.32.46/drivers/ata/pata_via.c linux-2.6.32.46/drivers/ata/pata_via.c
25902--- linux-2.6.32.46/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
25903+++ linux-2.6.32.46/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
25904@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
25905 ATA_BMDMA_SHT(DRV_NAME),
25906 };
25907
25908-static struct ata_port_operations via_port_ops = {
25909+static const struct ata_port_operations via_port_ops = {
25910 .inherits = &ata_bmdma_port_ops,
25911 .cable_detect = via_cable_detect,
25912 .set_piomode = via_set_piomode,
25913@@ -429,7 +429,7 @@ static struct ata_port_operations via_po
25914 .port_start = via_port_start,
25915 };
25916
25917-static struct ata_port_operations via_port_ops_noirq = {
25918+static const struct ata_port_operations via_port_ops_noirq = {
25919 .inherits = &via_port_ops,
25920 .sff_data_xfer = ata_sff_data_xfer_noirq,
25921 };
25922diff -urNp linux-2.6.32.46/drivers/ata/pata_winbond.c linux-2.6.32.46/drivers/ata/pata_winbond.c
25923--- linux-2.6.32.46/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
25924+++ linux-2.6.32.46/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
25925@@ -125,7 +125,7 @@ static struct scsi_host_template winbond
25926 ATA_PIO_SHT(DRV_NAME),
25927 };
25928
25929-static struct ata_port_operations winbond_port_ops = {
25930+static const struct ata_port_operations winbond_port_ops = {
25931 .inherits = &ata_sff_port_ops,
25932 .sff_data_xfer = winbond_data_xfer,
25933 .cable_detect = ata_cable_40wire,
25934diff -urNp linux-2.6.32.46/drivers/ata/pdc_adma.c linux-2.6.32.46/drivers/ata/pdc_adma.c
25935--- linux-2.6.32.46/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
25936+++ linux-2.6.32.46/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
25937@@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
25938 .dma_boundary = ADMA_DMA_BOUNDARY,
25939 };
25940
25941-static struct ata_port_operations adma_ata_ops = {
25942+static const struct ata_port_operations adma_ata_ops = {
25943 .inherits = &ata_sff_port_ops,
25944
25945 .lost_interrupt = ATA_OP_NULL,
25946diff -urNp linux-2.6.32.46/drivers/ata/sata_fsl.c linux-2.6.32.46/drivers/ata/sata_fsl.c
25947--- linux-2.6.32.46/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
25948+++ linux-2.6.32.46/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
25949@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
25950 .dma_boundary = ATA_DMA_BOUNDARY,
25951 };
25952
25953-static struct ata_port_operations sata_fsl_ops = {
25954+static const struct ata_port_operations sata_fsl_ops = {
25955 .inherits = &sata_pmp_port_ops,
25956
25957 .qc_defer = ata_std_qc_defer,
25958diff -urNp linux-2.6.32.46/drivers/ata/sata_inic162x.c linux-2.6.32.46/drivers/ata/sata_inic162x.c
25959--- linux-2.6.32.46/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
25960+++ linux-2.6.32.46/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
25961@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
25962 return 0;
25963 }
25964
25965-static struct ata_port_operations inic_port_ops = {
25966+static const struct ata_port_operations inic_port_ops = {
25967 .inherits = &sata_port_ops,
25968
25969 .check_atapi_dma = inic_check_atapi_dma,
25970diff -urNp linux-2.6.32.46/drivers/ata/sata_mv.c linux-2.6.32.46/drivers/ata/sata_mv.c
25971--- linux-2.6.32.46/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
25972+++ linux-2.6.32.46/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
25973@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
25974 .dma_boundary = MV_DMA_BOUNDARY,
25975 };
25976
25977-static struct ata_port_operations mv5_ops = {
25978+static const struct ata_port_operations mv5_ops = {
25979 .inherits = &ata_sff_port_ops,
25980
25981 .lost_interrupt = ATA_OP_NULL,
25982@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
25983 .port_stop = mv_port_stop,
25984 };
25985
25986-static struct ata_port_operations mv6_ops = {
25987+static const struct ata_port_operations mv6_ops = {
25988 .inherits = &mv5_ops,
25989 .dev_config = mv6_dev_config,
25990 .scr_read = mv_scr_read,
25991@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
25992 .bmdma_status = mv_bmdma_status,
25993 };
25994
25995-static struct ata_port_operations mv_iie_ops = {
25996+static const struct ata_port_operations mv_iie_ops = {
25997 .inherits = &mv6_ops,
25998 .dev_config = ATA_OP_NULL,
25999 .qc_prep = mv_qc_prep_iie,
26000diff -urNp linux-2.6.32.46/drivers/ata/sata_nv.c linux-2.6.32.46/drivers/ata/sata_nv.c
26001--- linux-2.6.32.46/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
26002+++ linux-2.6.32.46/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
26003@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
26004 * cases. Define nv_hardreset() which only kicks in for post-boot
26005 * probing and use it for all variants.
26006 */
26007-static struct ata_port_operations nv_generic_ops = {
26008+static const struct ata_port_operations nv_generic_ops = {
26009 .inherits = &ata_bmdma_port_ops,
26010 .lost_interrupt = ATA_OP_NULL,
26011 .scr_read = nv_scr_read,
26012@@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
26013 .hardreset = nv_hardreset,
26014 };
26015
26016-static struct ata_port_operations nv_nf2_ops = {
26017+static const struct ata_port_operations nv_nf2_ops = {
26018 .inherits = &nv_generic_ops,
26019 .freeze = nv_nf2_freeze,
26020 .thaw = nv_nf2_thaw,
26021 };
26022
26023-static struct ata_port_operations nv_ck804_ops = {
26024+static const struct ata_port_operations nv_ck804_ops = {
26025 .inherits = &nv_generic_ops,
26026 .freeze = nv_ck804_freeze,
26027 .thaw = nv_ck804_thaw,
26028 .host_stop = nv_ck804_host_stop,
26029 };
26030
26031-static struct ata_port_operations nv_adma_ops = {
26032+static const struct ata_port_operations nv_adma_ops = {
26033 .inherits = &nv_ck804_ops,
26034
26035 .check_atapi_dma = nv_adma_check_atapi_dma,
26036@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
26037 .host_stop = nv_adma_host_stop,
26038 };
26039
26040-static struct ata_port_operations nv_swncq_ops = {
26041+static const struct ata_port_operations nv_swncq_ops = {
26042 .inherits = &nv_generic_ops,
26043
26044 .qc_defer = ata_std_qc_defer,
26045diff -urNp linux-2.6.32.46/drivers/ata/sata_promise.c linux-2.6.32.46/drivers/ata/sata_promise.c
26046--- linux-2.6.32.46/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
26047+++ linux-2.6.32.46/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
26048@@ -195,7 +195,7 @@ static const struct ata_port_operations
26049 .error_handler = pdc_error_handler,
26050 };
26051
26052-static struct ata_port_operations pdc_sata_ops = {
26053+static const struct ata_port_operations pdc_sata_ops = {
26054 .inherits = &pdc_common_ops,
26055 .cable_detect = pdc_sata_cable_detect,
26056 .freeze = pdc_sata_freeze,
26057@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
26058
26059 /* First-generation chips need a more restrictive ->check_atapi_dma op,
26060 and ->freeze/thaw that ignore the hotplug controls. */
26061-static struct ata_port_operations pdc_old_sata_ops = {
26062+static const struct ata_port_operations pdc_old_sata_ops = {
26063 .inherits = &pdc_sata_ops,
26064 .freeze = pdc_freeze,
26065 .thaw = pdc_thaw,
26066 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
26067 };
26068
26069-static struct ata_port_operations pdc_pata_ops = {
26070+static const struct ata_port_operations pdc_pata_ops = {
26071 .inherits = &pdc_common_ops,
26072 .cable_detect = pdc_pata_cable_detect,
26073 .freeze = pdc_freeze,
26074diff -urNp linux-2.6.32.46/drivers/ata/sata_qstor.c linux-2.6.32.46/drivers/ata/sata_qstor.c
26075--- linux-2.6.32.46/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
26076+++ linux-2.6.32.46/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
26077@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
26078 .dma_boundary = QS_DMA_BOUNDARY,
26079 };
26080
26081-static struct ata_port_operations qs_ata_ops = {
26082+static const struct ata_port_operations qs_ata_ops = {
26083 .inherits = &ata_sff_port_ops,
26084
26085 .check_atapi_dma = qs_check_atapi_dma,
26086diff -urNp linux-2.6.32.46/drivers/ata/sata_sil24.c linux-2.6.32.46/drivers/ata/sata_sil24.c
26087--- linux-2.6.32.46/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
26088+++ linux-2.6.32.46/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
26089@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
26090 .dma_boundary = ATA_DMA_BOUNDARY,
26091 };
26092
26093-static struct ata_port_operations sil24_ops = {
26094+static const struct ata_port_operations sil24_ops = {
26095 .inherits = &sata_pmp_port_ops,
26096
26097 .qc_defer = sil24_qc_defer,
26098diff -urNp linux-2.6.32.46/drivers/ata/sata_sil.c linux-2.6.32.46/drivers/ata/sata_sil.c
26099--- linux-2.6.32.46/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
26100+++ linux-2.6.32.46/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
26101@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
26102 .sg_tablesize = ATA_MAX_PRD
26103 };
26104
26105-static struct ata_port_operations sil_ops = {
26106+static const struct ata_port_operations sil_ops = {
26107 .inherits = &ata_bmdma32_port_ops,
26108 .dev_config = sil_dev_config,
26109 .set_mode = sil_set_mode,
26110diff -urNp linux-2.6.32.46/drivers/ata/sata_sis.c linux-2.6.32.46/drivers/ata/sata_sis.c
26111--- linux-2.6.32.46/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
26112+++ linux-2.6.32.46/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
26113@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
26114 ATA_BMDMA_SHT(DRV_NAME),
26115 };
26116
26117-static struct ata_port_operations sis_ops = {
26118+static const struct ata_port_operations sis_ops = {
26119 .inherits = &ata_bmdma_port_ops,
26120 .scr_read = sis_scr_read,
26121 .scr_write = sis_scr_write,
26122diff -urNp linux-2.6.32.46/drivers/ata/sata_svw.c linux-2.6.32.46/drivers/ata/sata_svw.c
26123--- linux-2.6.32.46/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
26124+++ linux-2.6.32.46/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
26125@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
26126 };
26127
26128
26129-static struct ata_port_operations k2_sata_ops = {
26130+static const struct ata_port_operations k2_sata_ops = {
26131 .inherits = &ata_bmdma_port_ops,
26132 .sff_tf_load = k2_sata_tf_load,
26133 .sff_tf_read = k2_sata_tf_read,
26134diff -urNp linux-2.6.32.46/drivers/ata/sata_sx4.c linux-2.6.32.46/drivers/ata/sata_sx4.c
26135--- linux-2.6.32.46/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
26136+++ linux-2.6.32.46/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
26137@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
26138 };
26139
26140 /* TODO: inherit from base port_ops after converting to new EH */
26141-static struct ata_port_operations pdc_20621_ops = {
26142+static const struct ata_port_operations pdc_20621_ops = {
26143 .inherits = &ata_sff_port_ops,
26144
26145 .check_atapi_dma = pdc_check_atapi_dma,
26146diff -urNp linux-2.6.32.46/drivers/ata/sata_uli.c linux-2.6.32.46/drivers/ata/sata_uli.c
26147--- linux-2.6.32.46/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
26148+++ linux-2.6.32.46/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
26149@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
26150 ATA_BMDMA_SHT(DRV_NAME),
26151 };
26152
26153-static struct ata_port_operations uli_ops = {
26154+static const struct ata_port_operations uli_ops = {
26155 .inherits = &ata_bmdma_port_ops,
26156 .scr_read = uli_scr_read,
26157 .scr_write = uli_scr_write,
26158diff -urNp linux-2.6.32.46/drivers/ata/sata_via.c linux-2.6.32.46/drivers/ata/sata_via.c
26159--- linux-2.6.32.46/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
26160+++ linux-2.6.32.46/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
26161@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
26162 ATA_BMDMA_SHT(DRV_NAME),
26163 };
26164
26165-static struct ata_port_operations svia_base_ops = {
26166+static const struct ata_port_operations svia_base_ops = {
26167 .inherits = &ata_bmdma_port_ops,
26168 .sff_tf_load = svia_tf_load,
26169 };
26170
26171-static struct ata_port_operations vt6420_sata_ops = {
26172+static const struct ata_port_operations vt6420_sata_ops = {
26173 .inherits = &svia_base_ops,
26174 .freeze = svia_noop_freeze,
26175 .prereset = vt6420_prereset,
26176 .bmdma_start = vt6420_bmdma_start,
26177 };
26178
26179-static struct ata_port_operations vt6421_pata_ops = {
26180+static const struct ata_port_operations vt6421_pata_ops = {
26181 .inherits = &svia_base_ops,
26182 .cable_detect = vt6421_pata_cable_detect,
26183 .set_piomode = vt6421_set_pio_mode,
26184 .set_dmamode = vt6421_set_dma_mode,
26185 };
26186
26187-static struct ata_port_operations vt6421_sata_ops = {
26188+static const struct ata_port_operations vt6421_sata_ops = {
26189 .inherits = &svia_base_ops,
26190 .scr_read = svia_scr_read,
26191 .scr_write = svia_scr_write,
26192 };
26193
26194-static struct ata_port_operations vt8251_ops = {
26195+static const struct ata_port_operations vt8251_ops = {
26196 .inherits = &svia_base_ops,
26197 .hardreset = sata_std_hardreset,
26198 .scr_read = vt8251_scr_read,
26199diff -urNp linux-2.6.32.46/drivers/ata/sata_vsc.c linux-2.6.32.46/drivers/ata/sata_vsc.c
26200--- linux-2.6.32.46/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
26201+++ linux-2.6.32.46/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
26202@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
26203 };
26204
26205
26206-static struct ata_port_operations vsc_sata_ops = {
26207+static const struct ata_port_operations vsc_sata_ops = {
26208 .inherits = &ata_bmdma_port_ops,
26209 /* The IRQ handling is not quite standard SFF behaviour so we
26210 cannot use the default lost interrupt handler */
26211diff -urNp linux-2.6.32.46/drivers/atm/adummy.c linux-2.6.32.46/drivers/atm/adummy.c
26212--- linux-2.6.32.46/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
26213+++ linux-2.6.32.46/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
26214@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
26215 vcc->pop(vcc, skb);
26216 else
26217 dev_kfree_skb_any(skb);
26218- atomic_inc(&vcc->stats->tx);
26219+ atomic_inc_unchecked(&vcc->stats->tx);
26220
26221 return 0;
26222 }
26223diff -urNp linux-2.6.32.46/drivers/atm/ambassador.c linux-2.6.32.46/drivers/atm/ambassador.c
26224--- linux-2.6.32.46/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
26225+++ linux-2.6.32.46/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
26226@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
26227 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
26228
26229 // VC layer stats
26230- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26231+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26232
26233 // free the descriptor
26234 kfree (tx_descr);
26235@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
26236 dump_skb ("<<<", vc, skb);
26237
26238 // VC layer stats
26239- atomic_inc(&atm_vcc->stats->rx);
26240+ atomic_inc_unchecked(&atm_vcc->stats->rx);
26241 __net_timestamp(skb);
26242 // end of our responsability
26243 atm_vcc->push (atm_vcc, skb);
26244@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
26245 } else {
26246 PRINTK (KERN_INFO, "dropped over-size frame");
26247 // should we count this?
26248- atomic_inc(&atm_vcc->stats->rx_drop);
26249+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26250 }
26251
26252 } else {
26253@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
26254 }
26255
26256 if (check_area (skb->data, skb->len)) {
26257- atomic_inc(&atm_vcc->stats->tx_err);
26258+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
26259 return -ENOMEM; // ?
26260 }
26261
26262diff -urNp linux-2.6.32.46/drivers/atm/atmtcp.c linux-2.6.32.46/drivers/atm/atmtcp.c
26263--- linux-2.6.32.46/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
26264+++ linux-2.6.32.46/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
26265@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
26266 if (vcc->pop) vcc->pop(vcc,skb);
26267 else dev_kfree_skb(skb);
26268 if (dev_data) return 0;
26269- atomic_inc(&vcc->stats->tx_err);
26270+ atomic_inc_unchecked(&vcc->stats->tx_err);
26271 return -ENOLINK;
26272 }
26273 size = skb->len+sizeof(struct atmtcp_hdr);
26274@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
26275 if (!new_skb) {
26276 if (vcc->pop) vcc->pop(vcc,skb);
26277 else dev_kfree_skb(skb);
26278- atomic_inc(&vcc->stats->tx_err);
26279+ atomic_inc_unchecked(&vcc->stats->tx_err);
26280 return -ENOBUFS;
26281 }
26282 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
26283@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
26284 if (vcc->pop) vcc->pop(vcc,skb);
26285 else dev_kfree_skb(skb);
26286 out_vcc->push(out_vcc,new_skb);
26287- atomic_inc(&vcc->stats->tx);
26288- atomic_inc(&out_vcc->stats->rx);
26289+ atomic_inc_unchecked(&vcc->stats->tx);
26290+ atomic_inc_unchecked(&out_vcc->stats->rx);
26291 return 0;
26292 }
26293
26294@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
26295 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
26296 read_unlock(&vcc_sklist_lock);
26297 if (!out_vcc) {
26298- atomic_inc(&vcc->stats->tx_err);
26299+ atomic_inc_unchecked(&vcc->stats->tx_err);
26300 goto done;
26301 }
26302 skb_pull(skb,sizeof(struct atmtcp_hdr));
26303@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
26304 __net_timestamp(new_skb);
26305 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
26306 out_vcc->push(out_vcc,new_skb);
26307- atomic_inc(&vcc->stats->tx);
26308- atomic_inc(&out_vcc->stats->rx);
26309+ atomic_inc_unchecked(&vcc->stats->tx);
26310+ atomic_inc_unchecked(&out_vcc->stats->rx);
26311 done:
26312 if (vcc->pop) vcc->pop(vcc,skb);
26313 else dev_kfree_skb(skb);
26314diff -urNp linux-2.6.32.46/drivers/atm/eni.c linux-2.6.32.46/drivers/atm/eni.c
26315--- linux-2.6.32.46/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
26316+++ linux-2.6.32.46/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
26317@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
26318 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
26319 vcc->dev->number);
26320 length = 0;
26321- atomic_inc(&vcc->stats->rx_err);
26322+ atomic_inc_unchecked(&vcc->stats->rx_err);
26323 }
26324 else {
26325 length = ATM_CELL_SIZE-1; /* no HEC */
26326@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26327 size);
26328 }
26329 eff = length = 0;
26330- atomic_inc(&vcc->stats->rx_err);
26331+ atomic_inc_unchecked(&vcc->stats->rx_err);
26332 }
26333 else {
26334 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
26335@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26336 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
26337 vcc->dev->number,vcc->vci,length,size << 2,descr);
26338 length = eff = 0;
26339- atomic_inc(&vcc->stats->rx_err);
26340+ atomic_inc_unchecked(&vcc->stats->rx_err);
26341 }
26342 }
26343 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
26344@@ -770,7 +770,7 @@ rx_dequeued++;
26345 vcc->push(vcc,skb);
26346 pushed++;
26347 }
26348- atomic_inc(&vcc->stats->rx);
26349+ atomic_inc_unchecked(&vcc->stats->rx);
26350 }
26351 wake_up(&eni_dev->rx_wait);
26352 }
26353@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
26354 PCI_DMA_TODEVICE);
26355 if (vcc->pop) vcc->pop(vcc,skb);
26356 else dev_kfree_skb_irq(skb);
26357- atomic_inc(&vcc->stats->tx);
26358+ atomic_inc_unchecked(&vcc->stats->tx);
26359 wake_up(&eni_dev->tx_wait);
26360 dma_complete++;
26361 }
26362diff -urNp linux-2.6.32.46/drivers/atm/firestream.c linux-2.6.32.46/drivers/atm/firestream.c
26363--- linux-2.6.32.46/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
26364+++ linux-2.6.32.46/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
26365@@ -748,7 +748,7 @@ static void process_txdone_queue (struct
26366 }
26367 }
26368
26369- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26370+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26371
26372 fs_dprintk (FS_DEBUG_TXMEM, "i");
26373 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
26374@@ -815,7 +815,7 @@ static void process_incoming (struct fs_
26375 #endif
26376 skb_put (skb, qe->p1 & 0xffff);
26377 ATM_SKB(skb)->vcc = atm_vcc;
26378- atomic_inc(&atm_vcc->stats->rx);
26379+ atomic_inc_unchecked(&atm_vcc->stats->rx);
26380 __net_timestamp(skb);
26381 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
26382 atm_vcc->push (atm_vcc, skb);
26383@@ -836,12 +836,12 @@ static void process_incoming (struct fs_
26384 kfree (pe);
26385 }
26386 if (atm_vcc)
26387- atomic_inc(&atm_vcc->stats->rx_drop);
26388+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26389 break;
26390 case 0x1f: /* Reassembly abort: no buffers. */
26391 /* Silently increment error counter. */
26392 if (atm_vcc)
26393- atomic_inc(&atm_vcc->stats->rx_drop);
26394+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26395 break;
26396 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
26397 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
26398diff -urNp linux-2.6.32.46/drivers/atm/fore200e.c linux-2.6.32.46/drivers/atm/fore200e.c
26399--- linux-2.6.32.46/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
26400+++ linux-2.6.32.46/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
26401@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
26402 #endif
26403 /* check error condition */
26404 if (*entry->status & STATUS_ERROR)
26405- atomic_inc(&vcc->stats->tx_err);
26406+ atomic_inc_unchecked(&vcc->stats->tx_err);
26407 else
26408- atomic_inc(&vcc->stats->tx);
26409+ atomic_inc_unchecked(&vcc->stats->tx);
26410 }
26411 }
26412
26413@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
26414 if (skb == NULL) {
26415 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
26416
26417- atomic_inc(&vcc->stats->rx_drop);
26418+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26419 return -ENOMEM;
26420 }
26421
26422@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
26423
26424 dev_kfree_skb_any(skb);
26425
26426- atomic_inc(&vcc->stats->rx_drop);
26427+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26428 return -ENOMEM;
26429 }
26430
26431 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26432
26433 vcc->push(vcc, skb);
26434- atomic_inc(&vcc->stats->rx);
26435+ atomic_inc_unchecked(&vcc->stats->rx);
26436
26437 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26438
26439@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
26440 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
26441 fore200e->atm_dev->number,
26442 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
26443- atomic_inc(&vcc->stats->rx_err);
26444+ atomic_inc_unchecked(&vcc->stats->rx_err);
26445 }
26446 }
26447
26448@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
26449 goto retry_here;
26450 }
26451
26452- atomic_inc(&vcc->stats->tx_err);
26453+ atomic_inc_unchecked(&vcc->stats->tx_err);
26454
26455 fore200e->tx_sat++;
26456 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
26457diff -urNp linux-2.6.32.46/drivers/atm/he.c linux-2.6.32.46/drivers/atm/he.c
26458--- linux-2.6.32.46/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
26459+++ linux-2.6.32.46/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
26460@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26461
26462 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
26463 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
26464- atomic_inc(&vcc->stats->rx_drop);
26465+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26466 goto return_host_buffers;
26467 }
26468
26469@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26470 RBRQ_LEN_ERR(he_dev->rbrq_head)
26471 ? "LEN_ERR" : "",
26472 vcc->vpi, vcc->vci);
26473- atomic_inc(&vcc->stats->rx_err);
26474+ atomic_inc_unchecked(&vcc->stats->rx_err);
26475 goto return_host_buffers;
26476 }
26477
26478@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26479 vcc->push(vcc, skb);
26480 spin_lock(&he_dev->global_lock);
26481
26482- atomic_inc(&vcc->stats->rx);
26483+ atomic_inc_unchecked(&vcc->stats->rx);
26484
26485 return_host_buffers:
26486 ++pdus_assembled;
26487@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
26488 tpd->vcc->pop(tpd->vcc, tpd->skb);
26489 else
26490 dev_kfree_skb_any(tpd->skb);
26491- atomic_inc(&tpd->vcc->stats->tx_err);
26492+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
26493 }
26494 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
26495 return;
26496@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26497 vcc->pop(vcc, skb);
26498 else
26499 dev_kfree_skb_any(skb);
26500- atomic_inc(&vcc->stats->tx_err);
26501+ atomic_inc_unchecked(&vcc->stats->tx_err);
26502 return -EINVAL;
26503 }
26504
26505@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26506 vcc->pop(vcc, skb);
26507 else
26508 dev_kfree_skb_any(skb);
26509- atomic_inc(&vcc->stats->tx_err);
26510+ atomic_inc_unchecked(&vcc->stats->tx_err);
26511 return -EINVAL;
26512 }
26513 #endif
26514@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26515 vcc->pop(vcc, skb);
26516 else
26517 dev_kfree_skb_any(skb);
26518- atomic_inc(&vcc->stats->tx_err);
26519+ atomic_inc_unchecked(&vcc->stats->tx_err);
26520 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26521 return -ENOMEM;
26522 }
26523@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26524 vcc->pop(vcc, skb);
26525 else
26526 dev_kfree_skb_any(skb);
26527- atomic_inc(&vcc->stats->tx_err);
26528+ atomic_inc_unchecked(&vcc->stats->tx_err);
26529 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26530 return -ENOMEM;
26531 }
26532@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26533 __enqueue_tpd(he_dev, tpd, cid);
26534 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26535
26536- atomic_inc(&vcc->stats->tx);
26537+ atomic_inc_unchecked(&vcc->stats->tx);
26538
26539 return 0;
26540 }
26541diff -urNp linux-2.6.32.46/drivers/atm/horizon.c linux-2.6.32.46/drivers/atm/horizon.c
26542--- linux-2.6.32.46/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
26543+++ linux-2.6.32.46/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
26544@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
26545 {
26546 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
26547 // VC layer stats
26548- atomic_inc(&vcc->stats->rx);
26549+ atomic_inc_unchecked(&vcc->stats->rx);
26550 __net_timestamp(skb);
26551 // end of our responsability
26552 vcc->push (vcc, skb);
26553@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
26554 dev->tx_iovec = NULL;
26555
26556 // VC layer stats
26557- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26558+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26559
26560 // free the skb
26561 hrz_kfree_skb (skb);
26562diff -urNp linux-2.6.32.46/drivers/atm/idt77252.c linux-2.6.32.46/drivers/atm/idt77252.c
26563--- linux-2.6.32.46/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
26564+++ linux-2.6.32.46/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
26565@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
26566 else
26567 dev_kfree_skb(skb);
26568
26569- atomic_inc(&vcc->stats->tx);
26570+ atomic_inc_unchecked(&vcc->stats->tx);
26571 }
26572
26573 atomic_dec(&scq->used);
26574@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
26575 if ((sb = dev_alloc_skb(64)) == NULL) {
26576 printk("%s: Can't allocate buffers for aal0.\n",
26577 card->name);
26578- atomic_add(i, &vcc->stats->rx_drop);
26579+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
26580 break;
26581 }
26582 if (!atm_charge(vcc, sb->truesize)) {
26583 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
26584 card->name);
26585- atomic_add(i - 1, &vcc->stats->rx_drop);
26586+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
26587 dev_kfree_skb(sb);
26588 break;
26589 }
26590@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
26591 ATM_SKB(sb)->vcc = vcc;
26592 __net_timestamp(sb);
26593 vcc->push(vcc, sb);
26594- atomic_inc(&vcc->stats->rx);
26595+ atomic_inc_unchecked(&vcc->stats->rx);
26596
26597 cell += ATM_CELL_PAYLOAD;
26598 }
26599@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
26600 "(CDC: %08x)\n",
26601 card->name, len, rpp->len, readl(SAR_REG_CDC));
26602 recycle_rx_pool_skb(card, rpp);
26603- atomic_inc(&vcc->stats->rx_err);
26604+ atomic_inc_unchecked(&vcc->stats->rx_err);
26605 return;
26606 }
26607 if (stat & SAR_RSQE_CRC) {
26608 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
26609 recycle_rx_pool_skb(card, rpp);
26610- atomic_inc(&vcc->stats->rx_err);
26611+ atomic_inc_unchecked(&vcc->stats->rx_err);
26612 return;
26613 }
26614 if (skb_queue_len(&rpp->queue) > 1) {
26615@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
26616 RXPRINTK("%s: Can't alloc RX skb.\n",
26617 card->name);
26618 recycle_rx_pool_skb(card, rpp);
26619- atomic_inc(&vcc->stats->rx_err);
26620+ atomic_inc_unchecked(&vcc->stats->rx_err);
26621 return;
26622 }
26623 if (!atm_charge(vcc, skb->truesize)) {
26624@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
26625 __net_timestamp(skb);
26626
26627 vcc->push(vcc, skb);
26628- atomic_inc(&vcc->stats->rx);
26629+ atomic_inc_unchecked(&vcc->stats->rx);
26630
26631 return;
26632 }
26633@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
26634 __net_timestamp(skb);
26635
26636 vcc->push(vcc, skb);
26637- atomic_inc(&vcc->stats->rx);
26638+ atomic_inc_unchecked(&vcc->stats->rx);
26639
26640 if (skb->truesize > SAR_FB_SIZE_3)
26641 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
26642@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
26643 if (vcc->qos.aal != ATM_AAL0) {
26644 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
26645 card->name, vpi, vci);
26646- atomic_inc(&vcc->stats->rx_drop);
26647+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26648 goto drop;
26649 }
26650
26651 if ((sb = dev_alloc_skb(64)) == NULL) {
26652 printk("%s: Can't allocate buffers for AAL0.\n",
26653 card->name);
26654- atomic_inc(&vcc->stats->rx_err);
26655+ atomic_inc_unchecked(&vcc->stats->rx_err);
26656 goto drop;
26657 }
26658
26659@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
26660 ATM_SKB(sb)->vcc = vcc;
26661 __net_timestamp(sb);
26662 vcc->push(vcc, sb);
26663- atomic_inc(&vcc->stats->rx);
26664+ atomic_inc_unchecked(&vcc->stats->rx);
26665
26666 drop:
26667 skb_pull(queue, 64);
26668@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26669
26670 if (vc == NULL) {
26671 printk("%s: NULL connection in send().\n", card->name);
26672- atomic_inc(&vcc->stats->tx_err);
26673+ atomic_inc_unchecked(&vcc->stats->tx_err);
26674 dev_kfree_skb(skb);
26675 return -EINVAL;
26676 }
26677 if (!test_bit(VCF_TX, &vc->flags)) {
26678 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
26679- atomic_inc(&vcc->stats->tx_err);
26680+ atomic_inc_unchecked(&vcc->stats->tx_err);
26681 dev_kfree_skb(skb);
26682 return -EINVAL;
26683 }
26684@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26685 break;
26686 default:
26687 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
26688- atomic_inc(&vcc->stats->tx_err);
26689+ atomic_inc_unchecked(&vcc->stats->tx_err);
26690 dev_kfree_skb(skb);
26691 return -EINVAL;
26692 }
26693
26694 if (skb_shinfo(skb)->nr_frags != 0) {
26695 printk("%s: No scatter-gather yet.\n", card->name);
26696- atomic_inc(&vcc->stats->tx_err);
26697+ atomic_inc_unchecked(&vcc->stats->tx_err);
26698 dev_kfree_skb(skb);
26699 return -EINVAL;
26700 }
26701@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26702
26703 err = queue_skb(card, vc, skb, oam);
26704 if (err) {
26705- atomic_inc(&vcc->stats->tx_err);
26706+ atomic_inc_unchecked(&vcc->stats->tx_err);
26707 dev_kfree_skb(skb);
26708 return err;
26709 }
26710@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
26711 skb = dev_alloc_skb(64);
26712 if (!skb) {
26713 printk("%s: Out of memory in send_oam().\n", card->name);
26714- atomic_inc(&vcc->stats->tx_err);
26715+ atomic_inc_unchecked(&vcc->stats->tx_err);
26716 return -ENOMEM;
26717 }
26718 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
26719diff -urNp linux-2.6.32.46/drivers/atm/iphase.c linux-2.6.32.46/drivers/atm/iphase.c
26720--- linux-2.6.32.46/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
26721+++ linux-2.6.32.46/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
26722@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
26723 status = (u_short) (buf_desc_ptr->desc_mode);
26724 if (status & (RX_CER | RX_PTE | RX_OFL))
26725 {
26726- atomic_inc(&vcc->stats->rx_err);
26727+ atomic_inc_unchecked(&vcc->stats->rx_err);
26728 IF_ERR(printk("IA: bad packet, dropping it");)
26729 if (status & RX_CER) {
26730 IF_ERR(printk(" cause: packet CRC error\n");)
26731@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
26732 len = dma_addr - buf_addr;
26733 if (len > iadev->rx_buf_sz) {
26734 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
26735- atomic_inc(&vcc->stats->rx_err);
26736+ atomic_inc_unchecked(&vcc->stats->rx_err);
26737 goto out_free_desc;
26738 }
26739
26740@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
26741 ia_vcc = INPH_IA_VCC(vcc);
26742 if (ia_vcc == NULL)
26743 {
26744- atomic_inc(&vcc->stats->rx_err);
26745+ atomic_inc_unchecked(&vcc->stats->rx_err);
26746 dev_kfree_skb_any(skb);
26747 atm_return(vcc, atm_guess_pdu2truesize(len));
26748 goto INCR_DLE;
26749@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
26750 if ((length > iadev->rx_buf_sz) || (length >
26751 (skb->len - sizeof(struct cpcs_trailer))))
26752 {
26753- atomic_inc(&vcc->stats->rx_err);
26754+ atomic_inc_unchecked(&vcc->stats->rx_err);
26755 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26756 length, skb->len);)
26757 dev_kfree_skb_any(skb);
26758@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
26759
26760 IF_RX(printk("rx_dle_intr: skb push");)
26761 vcc->push(vcc,skb);
26762- atomic_inc(&vcc->stats->rx);
26763+ atomic_inc_unchecked(&vcc->stats->rx);
26764 iadev->rx_pkt_cnt++;
26765 }
26766 INCR_DLE:
26767@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
26768 {
26769 struct k_sonet_stats *stats;
26770 stats = &PRIV(_ia_dev[board])->sonet_stats;
26771- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26772- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26773- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26774- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26775- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26776- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26777- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26778- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26779- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26780+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26781+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26782+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26783+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26784+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26785+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26786+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26787+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26788+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26789 }
26790 ia_cmds.status = 0;
26791 break;
26792@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
26793 if ((desc == 0) || (desc > iadev->num_tx_desc))
26794 {
26795 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26796- atomic_inc(&vcc->stats->tx);
26797+ atomic_inc_unchecked(&vcc->stats->tx);
26798 if (vcc->pop)
26799 vcc->pop(vcc, skb);
26800 else
26801@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
26802 ATM_DESC(skb) = vcc->vci;
26803 skb_queue_tail(&iadev->tx_dma_q, skb);
26804
26805- atomic_inc(&vcc->stats->tx);
26806+ atomic_inc_unchecked(&vcc->stats->tx);
26807 iadev->tx_pkt_cnt++;
26808 /* Increment transaction counter */
26809 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26810
26811 #if 0
26812 /* add flow control logic */
26813- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26814+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26815 if (iavcc->vc_desc_cnt > 10) {
26816 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26817 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26818diff -urNp linux-2.6.32.46/drivers/atm/lanai.c linux-2.6.32.46/drivers/atm/lanai.c
26819--- linux-2.6.32.46/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
26820+++ linux-2.6.32.46/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
26821@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
26822 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26823 lanai_endtx(lanai, lvcc);
26824 lanai_free_skb(lvcc->tx.atmvcc, skb);
26825- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26826+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26827 }
26828
26829 /* Try to fill the buffer - don't call unless there is backlog */
26830@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
26831 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26832 __net_timestamp(skb);
26833 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26834- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26835+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26836 out:
26837 lvcc->rx.buf.ptr = end;
26838 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26839@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
26840 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26841 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26842 lanai->stats.service_rxnotaal5++;
26843- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26844+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26845 return 0;
26846 }
26847 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26848@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
26849 int bytes;
26850 read_unlock(&vcc_sklist_lock);
26851 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26852- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26853+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26854 lvcc->stats.x.aal5.service_trash++;
26855 bytes = (SERVICE_GET_END(s) * 16) -
26856 (((unsigned long) lvcc->rx.buf.ptr) -
26857@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
26858 }
26859 if (s & SERVICE_STREAM) {
26860 read_unlock(&vcc_sklist_lock);
26861- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26862+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26863 lvcc->stats.x.aal5.service_stream++;
26864 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26865 "PDU on VCI %d!\n", lanai->number, vci);
26866@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
26867 return 0;
26868 }
26869 DPRINTK("got rx crc error on vci %d\n", vci);
26870- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26871+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26872 lvcc->stats.x.aal5.service_rxcrc++;
26873 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26874 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26875diff -urNp linux-2.6.32.46/drivers/atm/nicstar.c linux-2.6.32.46/drivers/atm/nicstar.c
26876--- linux-2.6.32.46/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
26877+++ linux-2.6.32.46/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
26878@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
26879 if ((vc = (vc_map *) vcc->dev_data) == NULL)
26880 {
26881 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
26882- atomic_inc(&vcc->stats->tx_err);
26883+ atomic_inc_unchecked(&vcc->stats->tx_err);
26884 dev_kfree_skb_any(skb);
26885 return -EINVAL;
26886 }
26887@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
26888 if (!vc->tx)
26889 {
26890 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
26891- atomic_inc(&vcc->stats->tx_err);
26892+ atomic_inc_unchecked(&vcc->stats->tx_err);
26893 dev_kfree_skb_any(skb);
26894 return -EINVAL;
26895 }
26896@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
26897 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
26898 {
26899 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
26900- atomic_inc(&vcc->stats->tx_err);
26901+ atomic_inc_unchecked(&vcc->stats->tx_err);
26902 dev_kfree_skb_any(skb);
26903 return -EINVAL;
26904 }
26905@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
26906 if (skb_shinfo(skb)->nr_frags != 0)
26907 {
26908 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26909- atomic_inc(&vcc->stats->tx_err);
26910+ atomic_inc_unchecked(&vcc->stats->tx_err);
26911 dev_kfree_skb_any(skb);
26912 return -EINVAL;
26913 }
26914@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
26915
26916 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
26917 {
26918- atomic_inc(&vcc->stats->tx_err);
26919+ atomic_inc_unchecked(&vcc->stats->tx_err);
26920 dev_kfree_skb_any(skb);
26921 return -EIO;
26922 }
26923- atomic_inc(&vcc->stats->tx);
26924+ atomic_inc_unchecked(&vcc->stats->tx);
26925
26926 return 0;
26927 }
26928@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
26929 {
26930 printk("nicstar%d: Can't allocate buffers for aal0.\n",
26931 card->index);
26932- atomic_add(i,&vcc->stats->rx_drop);
26933+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
26934 break;
26935 }
26936 if (!atm_charge(vcc, sb->truesize))
26937 {
26938 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
26939 card->index);
26940- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26941+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26942 dev_kfree_skb_any(sb);
26943 break;
26944 }
26945@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
26946 ATM_SKB(sb)->vcc = vcc;
26947 __net_timestamp(sb);
26948 vcc->push(vcc, sb);
26949- atomic_inc(&vcc->stats->rx);
26950+ atomic_inc_unchecked(&vcc->stats->rx);
26951 cell += ATM_CELL_PAYLOAD;
26952 }
26953
26954@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
26955 if (iovb == NULL)
26956 {
26957 printk("nicstar%d: Out of iovec buffers.\n", card->index);
26958- atomic_inc(&vcc->stats->rx_drop);
26959+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26960 recycle_rx_buf(card, skb);
26961 return;
26962 }
26963@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
26964 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
26965 {
26966 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26967- atomic_inc(&vcc->stats->rx_err);
26968+ atomic_inc_unchecked(&vcc->stats->rx_err);
26969 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
26970 NS_SKB(iovb)->iovcnt = 0;
26971 iovb->len = 0;
26972@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
26973 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
26974 card->index);
26975 which_list(card, skb);
26976- atomic_inc(&vcc->stats->rx_err);
26977+ atomic_inc_unchecked(&vcc->stats->rx_err);
26978 recycle_rx_buf(card, skb);
26979 vc->rx_iov = NULL;
26980 recycle_iov_buf(card, iovb);
26981@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
26982 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
26983 card->index);
26984 which_list(card, skb);
26985- atomic_inc(&vcc->stats->rx_err);
26986+ atomic_inc_unchecked(&vcc->stats->rx_err);
26987 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26988 NS_SKB(iovb)->iovcnt);
26989 vc->rx_iov = NULL;
26990@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
26991 printk(" - PDU size mismatch.\n");
26992 else
26993 printk(".\n");
26994- atomic_inc(&vcc->stats->rx_err);
26995+ atomic_inc_unchecked(&vcc->stats->rx_err);
26996 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26997 NS_SKB(iovb)->iovcnt);
26998 vc->rx_iov = NULL;
26999@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
27000 if (!atm_charge(vcc, skb->truesize))
27001 {
27002 push_rxbufs(card, skb);
27003- atomic_inc(&vcc->stats->rx_drop);
27004+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27005 }
27006 else
27007 {
27008@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
27009 ATM_SKB(skb)->vcc = vcc;
27010 __net_timestamp(skb);
27011 vcc->push(vcc, skb);
27012- atomic_inc(&vcc->stats->rx);
27013+ atomic_inc_unchecked(&vcc->stats->rx);
27014 }
27015 }
27016 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
27017@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
27018 if (!atm_charge(vcc, sb->truesize))
27019 {
27020 push_rxbufs(card, sb);
27021- atomic_inc(&vcc->stats->rx_drop);
27022+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27023 }
27024 else
27025 {
27026@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
27027 ATM_SKB(sb)->vcc = vcc;
27028 __net_timestamp(sb);
27029 vcc->push(vcc, sb);
27030- atomic_inc(&vcc->stats->rx);
27031+ atomic_inc_unchecked(&vcc->stats->rx);
27032 }
27033
27034 push_rxbufs(card, skb);
27035@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
27036 if (!atm_charge(vcc, skb->truesize))
27037 {
27038 push_rxbufs(card, skb);
27039- atomic_inc(&vcc->stats->rx_drop);
27040+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27041 }
27042 else
27043 {
27044@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
27045 ATM_SKB(skb)->vcc = vcc;
27046 __net_timestamp(skb);
27047 vcc->push(vcc, skb);
27048- atomic_inc(&vcc->stats->rx);
27049+ atomic_inc_unchecked(&vcc->stats->rx);
27050 }
27051
27052 push_rxbufs(card, sb);
27053@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
27054 if (hb == NULL)
27055 {
27056 printk("nicstar%d: Out of huge buffers.\n", card->index);
27057- atomic_inc(&vcc->stats->rx_drop);
27058+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27059 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
27060 NS_SKB(iovb)->iovcnt);
27061 vc->rx_iov = NULL;
27062@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
27063 }
27064 else
27065 dev_kfree_skb_any(hb);
27066- atomic_inc(&vcc->stats->rx_drop);
27067+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27068 }
27069 else
27070 {
27071@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
27072 #endif /* NS_USE_DESTRUCTORS */
27073 __net_timestamp(hb);
27074 vcc->push(vcc, hb);
27075- atomic_inc(&vcc->stats->rx);
27076+ atomic_inc_unchecked(&vcc->stats->rx);
27077 }
27078 }
27079
27080diff -urNp linux-2.6.32.46/drivers/atm/solos-pci.c linux-2.6.32.46/drivers/atm/solos-pci.c
27081--- linux-2.6.32.46/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
27082+++ linux-2.6.32.46/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
27083@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
27084 }
27085 atm_charge(vcc, skb->truesize);
27086 vcc->push(vcc, skb);
27087- atomic_inc(&vcc->stats->rx);
27088+ atomic_inc_unchecked(&vcc->stats->rx);
27089 break;
27090
27091 case PKT_STATUS:
27092@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
27093 char msg[500];
27094 char item[10];
27095
27096+ pax_track_stack();
27097+
27098 len = buf->len;
27099 for (i = 0; i < len; i++){
27100 if(i % 8 == 0)
27101@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
27102 vcc = SKB_CB(oldskb)->vcc;
27103
27104 if (vcc) {
27105- atomic_inc(&vcc->stats->tx);
27106+ atomic_inc_unchecked(&vcc->stats->tx);
27107 solos_pop(vcc, oldskb);
27108 } else
27109 dev_kfree_skb_irq(oldskb);
27110diff -urNp linux-2.6.32.46/drivers/atm/suni.c linux-2.6.32.46/drivers/atm/suni.c
27111--- linux-2.6.32.46/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
27112+++ linux-2.6.32.46/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
27113@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
27114
27115
27116 #define ADD_LIMITED(s,v) \
27117- atomic_add((v),&stats->s); \
27118- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
27119+ atomic_add_unchecked((v),&stats->s); \
27120+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
27121
27122
27123 static void suni_hz(unsigned long from_timer)
27124diff -urNp linux-2.6.32.46/drivers/atm/uPD98402.c linux-2.6.32.46/drivers/atm/uPD98402.c
27125--- linux-2.6.32.46/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
27126+++ linux-2.6.32.46/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
27127@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
27128 struct sonet_stats tmp;
27129 int error = 0;
27130
27131- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27132+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27133 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
27134 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
27135 if (zero && !error) {
27136@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
27137
27138
27139 #define ADD_LIMITED(s,v) \
27140- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
27141- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
27142- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27143+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
27144+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
27145+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27146
27147
27148 static void stat_event(struct atm_dev *dev)
27149@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
27150 if (reason & uPD98402_INT_PFM) stat_event(dev);
27151 if (reason & uPD98402_INT_PCO) {
27152 (void) GET(PCOCR); /* clear interrupt cause */
27153- atomic_add(GET(HECCT),
27154+ atomic_add_unchecked(GET(HECCT),
27155 &PRIV(dev)->sonet_stats.uncorr_hcs);
27156 }
27157 if ((reason & uPD98402_INT_RFO) &&
27158@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
27159 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
27160 uPD98402_INT_LOS),PIMR); /* enable them */
27161 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
27162- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27163- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
27164- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
27165+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27166+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
27167+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
27168 return 0;
27169 }
27170
27171diff -urNp linux-2.6.32.46/drivers/atm/zatm.c linux-2.6.32.46/drivers/atm/zatm.c
27172--- linux-2.6.32.46/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
27173+++ linux-2.6.32.46/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
27174@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
27175 }
27176 if (!size) {
27177 dev_kfree_skb_irq(skb);
27178- if (vcc) atomic_inc(&vcc->stats->rx_err);
27179+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
27180 continue;
27181 }
27182 if (!atm_charge(vcc,skb->truesize)) {
27183@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
27184 skb->len = size;
27185 ATM_SKB(skb)->vcc = vcc;
27186 vcc->push(vcc,skb);
27187- atomic_inc(&vcc->stats->rx);
27188+ atomic_inc_unchecked(&vcc->stats->rx);
27189 }
27190 zout(pos & 0xffff,MTA(mbx));
27191 #if 0 /* probably a stupid idea */
27192@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
27193 skb_queue_head(&zatm_vcc->backlog,skb);
27194 break;
27195 }
27196- atomic_inc(&vcc->stats->tx);
27197+ atomic_inc_unchecked(&vcc->stats->tx);
27198 wake_up(&zatm_vcc->tx_wait);
27199 }
27200
27201diff -urNp linux-2.6.32.46/drivers/base/bus.c linux-2.6.32.46/drivers/base/bus.c
27202--- linux-2.6.32.46/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
27203+++ linux-2.6.32.46/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
27204@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
27205 return ret;
27206 }
27207
27208-static struct sysfs_ops driver_sysfs_ops = {
27209+static const struct sysfs_ops driver_sysfs_ops = {
27210 .show = drv_attr_show,
27211 .store = drv_attr_store,
27212 };
27213@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
27214 return ret;
27215 }
27216
27217-static struct sysfs_ops bus_sysfs_ops = {
27218+static const struct sysfs_ops bus_sysfs_ops = {
27219 .show = bus_attr_show,
27220 .store = bus_attr_store,
27221 };
27222@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
27223 return 0;
27224 }
27225
27226-static struct kset_uevent_ops bus_uevent_ops = {
27227+static const struct kset_uevent_ops bus_uevent_ops = {
27228 .filter = bus_uevent_filter,
27229 };
27230
27231diff -urNp linux-2.6.32.46/drivers/base/class.c linux-2.6.32.46/drivers/base/class.c
27232--- linux-2.6.32.46/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
27233+++ linux-2.6.32.46/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
27234@@ -63,7 +63,7 @@ static void class_release(struct kobject
27235 kfree(cp);
27236 }
27237
27238-static struct sysfs_ops class_sysfs_ops = {
27239+static const struct sysfs_ops class_sysfs_ops = {
27240 .show = class_attr_show,
27241 .store = class_attr_store,
27242 };
27243diff -urNp linux-2.6.32.46/drivers/base/core.c linux-2.6.32.46/drivers/base/core.c
27244--- linux-2.6.32.46/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
27245+++ linux-2.6.32.46/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
27246@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
27247 return ret;
27248 }
27249
27250-static struct sysfs_ops dev_sysfs_ops = {
27251+static const struct sysfs_ops dev_sysfs_ops = {
27252 .show = dev_attr_show,
27253 .store = dev_attr_store,
27254 };
27255@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
27256 return retval;
27257 }
27258
27259-static struct kset_uevent_ops device_uevent_ops = {
27260+static const struct kset_uevent_ops device_uevent_ops = {
27261 .filter = dev_uevent_filter,
27262 .name = dev_uevent_name,
27263 .uevent = dev_uevent,
27264diff -urNp linux-2.6.32.46/drivers/base/memory.c linux-2.6.32.46/drivers/base/memory.c
27265--- linux-2.6.32.46/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
27266+++ linux-2.6.32.46/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
27267@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
27268 return retval;
27269 }
27270
27271-static struct kset_uevent_ops memory_uevent_ops = {
27272+static const struct kset_uevent_ops memory_uevent_ops = {
27273 .name = memory_uevent_name,
27274 .uevent = memory_uevent,
27275 };
27276diff -urNp linux-2.6.32.46/drivers/base/sys.c linux-2.6.32.46/drivers/base/sys.c
27277--- linux-2.6.32.46/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
27278+++ linux-2.6.32.46/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
27279@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
27280 return -EIO;
27281 }
27282
27283-static struct sysfs_ops sysfs_ops = {
27284+static const struct sysfs_ops sysfs_ops = {
27285 .show = sysdev_show,
27286 .store = sysdev_store,
27287 };
27288@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
27289 return -EIO;
27290 }
27291
27292-static struct sysfs_ops sysfs_class_ops = {
27293+static const struct sysfs_ops sysfs_class_ops = {
27294 .show = sysdev_class_show,
27295 .store = sysdev_class_store,
27296 };
27297diff -urNp linux-2.6.32.46/drivers/block/cciss.c linux-2.6.32.46/drivers/block/cciss.c
27298--- linux-2.6.32.46/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
27299+++ linux-2.6.32.46/drivers/block/cciss.c 2011-08-05 20:33:55.000000000 -0400
27300@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
27301 int err;
27302 u32 cp;
27303
27304+ memset(&arg64, 0, sizeof(arg64));
27305+
27306 err = 0;
27307 err |=
27308 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
27309@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ct
27310 /* Wait (up to 20 seconds) for a command to complete */
27311
27312 for (i = 20 * HZ; i > 0; i--) {
27313- done = hba[ctlr]->access.command_completed(hba[ctlr]);
27314+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
27315 if (done == FIFO_EMPTY)
27316 schedule_timeout_uninterruptible(1);
27317 else
27318@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h,
27319 resend_cmd1:
27320
27321 /* Disable interrupt on the board. */
27322- h->access.set_intr_mask(h, CCISS_INTR_OFF);
27323+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
27324
27325 /* Make sure there is room in the command FIFO */
27326 /* Actually it should be completely empty at this time */
27327@@ -2884,13 +2886,13 @@ resend_cmd1:
27328 /* tape side of the driver. */
27329 for (i = 200000; i > 0; i--) {
27330 /* if fifo isn't full go */
27331- if (!(h->access.fifo_full(h)))
27332+ if (!(h->access->fifo_full(h)))
27333 break;
27334 udelay(10);
27335 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
27336 " waiting!\n", h->ctlr);
27337 }
27338- h->access.submit_command(h, c); /* Send the cmd */
27339+ h->access->submit_command(h, c); /* Send the cmd */
27340 do {
27341 complete = pollcomplete(h->ctlr);
27342
27343@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
27344 while (!hlist_empty(&h->reqQ)) {
27345 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
27346 /* can't do anything if fifo is full */
27347- if ((h->access.fifo_full(h))) {
27348+ if ((h->access->fifo_full(h))) {
27349 printk(KERN_WARNING "cciss: fifo full\n");
27350 break;
27351 }
27352@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
27353 h->Qdepth--;
27354
27355 /* Tell the controller execute command */
27356- h->access.submit_command(h, c);
27357+ h->access->submit_command(h, c);
27358
27359 /* Put job onto the completed Q */
27360 addQ(&h->cmpQ, c);
27361@@ -3393,17 +3395,17 @@ startio:
27362
27363 static inline unsigned long get_next_completion(ctlr_info_t *h)
27364 {
27365- return h->access.command_completed(h);
27366+ return h->access->command_completed(h);
27367 }
27368
27369 static inline int interrupt_pending(ctlr_info_t *h)
27370 {
27371- return h->access.intr_pending(h);
27372+ return h->access->intr_pending(h);
27373 }
27374
27375 static inline long interrupt_not_for_us(ctlr_info_t *h)
27376 {
27377- return (((h->access.intr_pending(h) == 0) ||
27378+ return (((h->access->intr_pending(h) == 0) ||
27379 (h->interrupts_enabled == 0)));
27380 }
27381
27382@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr
27383 */
27384 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
27385 c->product_name = products[prod_index].product_name;
27386- c->access = *(products[prod_index].access);
27387+ c->access = products[prod_index].access;
27388 c->nr_cmds = c->max_commands - 4;
27389 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
27390 (readb(&c->cfgtable->Signature[1]) != 'I') ||
27391@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(stru
27392 }
27393
27394 /* make sure the board interrupts are off */
27395- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
27396+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
27397 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
27398 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
27399 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
27400@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(stru
27401 cciss_scsi_setup(i);
27402
27403 /* Turn the interrupts on so we can service requests */
27404- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
27405+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
27406
27407 /* Get the firmware version */
27408 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
27409diff -urNp linux-2.6.32.46/drivers/block/cciss.h linux-2.6.32.46/drivers/block/cciss.h
27410--- linux-2.6.32.46/drivers/block/cciss.h 2011-08-09 18:35:28.000000000 -0400
27411+++ linux-2.6.32.46/drivers/block/cciss.h 2011-08-09 18:33:59.000000000 -0400
27412@@ -90,7 +90,7 @@ struct ctlr_info
27413 // information about each logical volume
27414 drive_info_struct *drv[CISS_MAX_LUN];
27415
27416- struct access_method access;
27417+ struct access_method *access;
27418
27419 /* queue and queue Info */
27420 struct hlist_head reqQ;
27421diff -urNp linux-2.6.32.46/drivers/block/cpqarray.c linux-2.6.32.46/drivers/block/cpqarray.c
27422--- linux-2.6.32.46/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
27423+++ linux-2.6.32.46/drivers/block/cpqarray.c 2011-08-05 20:33:55.000000000 -0400
27424@@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr
27425 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
27426 goto Enomem4;
27427 }
27428- hba[i]->access.set_intr_mask(hba[i], 0);
27429+ hba[i]->access->set_intr_mask(hba[i], 0);
27430 if (request_irq(hba[i]->intr, do_ida_intr,
27431 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
27432 {
27433@@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr
27434 add_timer(&hba[i]->timer);
27435
27436 /* Enable IRQ now that spinlock and rate limit timer are set up */
27437- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27438+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27439
27440 for(j=0; j<NWD; j++) {
27441 struct gendisk *disk = ida_gendisk[i][j];
27442@@ -695,7 +695,7 @@ DBGINFO(
27443 for(i=0; i<NR_PRODUCTS; i++) {
27444 if (board_id == products[i].board_id) {
27445 c->product_name = products[i].product_name;
27446- c->access = *(products[i].access);
27447+ c->access = products[i].access;
27448 break;
27449 }
27450 }
27451@@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(v
27452 hba[ctlr]->intr = intr;
27453 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
27454 hba[ctlr]->product_name = products[j].product_name;
27455- hba[ctlr]->access = *(products[j].access);
27456+ hba[ctlr]->access = products[j].access;
27457 hba[ctlr]->ctlr = ctlr;
27458 hba[ctlr]->board_id = board_id;
27459 hba[ctlr]->pci_dev = NULL; /* not PCI */
27460@@ -896,6 +896,8 @@ static void do_ida_request(struct reques
27461 struct scatterlist tmp_sg[SG_MAX];
27462 int i, dir, seg;
27463
27464+ pax_track_stack();
27465+
27466 if (blk_queue_plugged(q))
27467 goto startio;
27468
27469@@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
27470
27471 while((c = h->reqQ) != NULL) {
27472 /* Can't do anything if we're busy */
27473- if (h->access.fifo_full(h) == 0)
27474+ if (h->access->fifo_full(h) == 0)
27475 return;
27476
27477 /* Get the first entry from the request Q */
27478@@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
27479 h->Qdepth--;
27480
27481 /* Tell the controller to do our bidding */
27482- h->access.submit_command(h, c);
27483+ h->access->submit_command(h, c);
27484
27485 /* Get onto the completion Q */
27486 addQ(&h->cmpQ, c);
27487@@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq,
27488 unsigned long flags;
27489 __u32 a,a1;
27490
27491- istat = h->access.intr_pending(h);
27492+ istat = h->access->intr_pending(h);
27493 /* Is this interrupt for us? */
27494 if (istat == 0)
27495 return IRQ_NONE;
27496@@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq,
27497 */
27498 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
27499 if (istat & FIFO_NOT_EMPTY) {
27500- while((a = h->access.command_completed(h))) {
27501+ while((a = h->access->command_completed(h))) {
27502 a1 = a; a &= ~3;
27503 if ((c = h->cmpQ) == NULL)
27504 {
27505@@ -1434,11 +1436,11 @@ static int sendcmd(
27506 /*
27507 * Disable interrupt
27508 */
27509- info_p->access.set_intr_mask(info_p, 0);
27510+ info_p->access->set_intr_mask(info_p, 0);
27511 /* Make sure there is room in the command FIFO */
27512 /* Actually it should be completely empty at this time. */
27513 for (i = 200000; i > 0; i--) {
27514- temp = info_p->access.fifo_full(info_p);
27515+ temp = info_p->access->fifo_full(info_p);
27516 if (temp != 0) {
27517 break;
27518 }
27519@@ -1451,7 +1453,7 @@ DBG(
27520 /*
27521 * Send the cmd
27522 */
27523- info_p->access.submit_command(info_p, c);
27524+ info_p->access->submit_command(info_p, c);
27525 complete = pollcomplete(ctlr);
27526
27527 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
27528@@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t
27529 * we check the new geometry. Then turn interrupts back on when
27530 * we're done.
27531 */
27532- host->access.set_intr_mask(host, 0);
27533+ host->access->set_intr_mask(host, 0);
27534 getgeometry(ctlr);
27535- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
27536+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
27537
27538 for(i=0; i<NWD; i++) {
27539 struct gendisk *disk = ida_gendisk[ctlr][i];
27540@@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
27541 /* Wait (up to 2 seconds) for a command to complete */
27542
27543 for (i = 200000; i > 0; i--) {
27544- done = hba[ctlr]->access.command_completed(hba[ctlr]);
27545+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
27546 if (done == 0) {
27547 udelay(10); /* a short fixed delay */
27548 } else
27549diff -urNp linux-2.6.32.46/drivers/block/cpqarray.h linux-2.6.32.46/drivers/block/cpqarray.h
27550--- linux-2.6.32.46/drivers/block/cpqarray.h 2011-03-27 14:31:47.000000000 -0400
27551+++ linux-2.6.32.46/drivers/block/cpqarray.h 2011-08-05 20:33:55.000000000 -0400
27552@@ -99,7 +99,7 @@ struct ctlr_info {
27553 drv_info_t drv[NWD];
27554 struct proc_dir_entry *proc;
27555
27556- struct access_method access;
27557+ struct access_method *access;
27558
27559 cmdlist_t *reqQ;
27560 cmdlist_t *cmpQ;
27561diff -urNp linux-2.6.32.46/drivers/block/DAC960.c linux-2.6.32.46/drivers/block/DAC960.c
27562--- linux-2.6.32.46/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
27563+++ linux-2.6.32.46/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
27564@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
27565 unsigned long flags;
27566 int Channel, TargetID;
27567
27568+ pax_track_stack();
27569+
27570 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
27571 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
27572 sizeof(DAC960_SCSI_Inquiry_T) +
27573diff -urNp linux-2.6.32.46/drivers/block/nbd.c linux-2.6.32.46/drivers/block/nbd.c
27574--- linux-2.6.32.46/drivers/block/nbd.c 2011-06-25 12:55:34.000000000 -0400
27575+++ linux-2.6.32.46/drivers/block/nbd.c 2011-06-25 12:56:37.000000000 -0400
27576@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
27577 struct kvec iov;
27578 sigset_t blocked, oldset;
27579
27580+ pax_track_stack();
27581+
27582 if (unlikely(!sock)) {
27583 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
27584 lo->disk->disk_name, (send ? "send" : "recv"));
27585@@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
27586 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
27587 unsigned int cmd, unsigned long arg)
27588 {
27589+ pax_track_stack();
27590+
27591 switch (cmd) {
27592 case NBD_DISCONNECT: {
27593 struct request sreq;
27594diff -urNp linux-2.6.32.46/drivers/block/pktcdvd.c linux-2.6.32.46/drivers/block/pktcdvd.c
27595--- linux-2.6.32.46/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
27596+++ linux-2.6.32.46/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
27597@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
27598 return len;
27599 }
27600
27601-static struct sysfs_ops kobj_pkt_ops = {
27602+static const struct sysfs_ops kobj_pkt_ops = {
27603 .show = kobj_pkt_show,
27604 .store = kobj_pkt_store
27605 };
27606diff -urNp linux-2.6.32.46/drivers/char/agp/frontend.c linux-2.6.32.46/drivers/char/agp/frontend.c
27607--- linux-2.6.32.46/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
27608+++ linux-2.6.32.46/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
27609@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
27610 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
27611 return -EFAULT;
27612
27613- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
27614+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
27615 return -EFAULT;
27616
27617 client = agp_find_client_by_pid(reserve.pid);
27618diff -urNp linux-2.6.32.46/drivers/char/briq_panel.c linux-2.6.32.46/drivers/char/briq_panel.c
27619--- linux-2.6.32.46/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
27620+++ linux-2.6.32.46/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
27621@@ -10,6 +10,7 @@
27622 #include <linux/types.h>
27623 #include <linux/errno.h>
27624 #include <linux/tty.h>
27625+#include <linux/mutex.h>
27626 #include <linux/timer.h>
27627 #include <linux/kernel.h>
27628 #include <linux/wait.h>
27629@@ -36,6 +37,7 @@ static int vfd_is_open;
27630 static unsigned char vfd[40];
27631 static int vfd_cursor;
27632 static unsigned char ledpb, led;
27633+static DEFINE_MUTEX(vfd_mutex);
27634
27635 static void update_vfd(void)
27636 {
27637@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
27638 if (!vfd_is_open)
27639 return -EBUSY;
27640
27641+ mutex_lock(&vfd_mutex);
27642 for (;;) {
27643 char c;
27644 if (!indx)
27645 break;
27646- if (get_user(c, buf))
27647+ if (get_user(c, buf)) {
27648+ mutex_unlock(&vfd_mutex);
27649 return -EFAULT;
27650+ }
27651 if (esc) {
27652 set_led(c);
27653 esc = 0;
27654@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
27655 buf++;
27656 }
27657 update_vfd();
27658+ mutex_unlock(&vfd_mutex);
27659
27660 return len;
27661 }
27662diff -urNp linux-2.6.32.46/drivers/char/genrtc.c linux-2.6.32.46/drivers/char/genrtc.c
27663--- linux-2.6.32.46/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
27664+++ linux-2.6.32.46/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
27665@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
27666 switch (cmd) {
27667
27668 case RTC_PLL_GET:
27669+ memset(&pll, 0, sizeof(pll));
27670 if (get_rtc_pll(&pll))
27671 return -EINVAL;
27672 else
27673diff -urNp linux-2.6.32.46/drivers/char/hpet.c linux-2.6.32.46/drivers/char/hpet.c
27674--- linux-2.6.32.46/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
27675+++ linux-2.6.32.46/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
27676@@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
27677 return 0;
27678 }
27679
27680-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
27681+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
27682
27683 static int
27684 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
27685@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
27686 }
27687
27688 static int
27689-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
27690+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
27691 {
27692 struct hpet_timer __iomem *timer;
27693 struct hpet __iomem *hpet;
27694@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
27695 {
27696 struct hpet_info info;
27697
27698+ memset(&info, 0, sizeof(info));
27699+
27700 if (devp->hd_ireqfreq)
27701 info.hi_ireqfreq =
27702 hpet_time_div(hpetp, devp->hd_ireqfreq);
27703- else
27704- info.hi_ireqfreq = 0;
27705 info.hi_flags =
27706 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
27707 info.hi_hpet = hpetp->hp_which;
27708diff -urNp linux-2.6.32.46/drivers/char/hvc_beat.c linux-2.6.32.46/drivers/char/hvc_beat.c
27709--- linux-2.6.32.46/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
27710+++ linux-2.6.32.46/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
27711@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
27712 return cnt;
27713 }
27714
27715-static struct hv_ops hvc_beat_get_put_ops = {
27716+static const struct hv_ops hvc_beat_get_put_ops = {
27717 .get_chars = hvc_beat_get_chars,
27718 .put_chars = hvc_beat_put_chars,
27719 };
27720diff -urNp linux-2.6.32.46/drivers/char/hvc_console.c linux-2.6.32.46/drivers/char/hvc_console.c
27721--- linux-2.6.32.46/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
27722+++ linux-2.6.32.46/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
27723@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
27724 * console interfaces but can still be used as a tty device. This has to be
27725 * static because kmalloc will not work during early console init.
27726 */
27727-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27728+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27729 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
27730 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
27731
27732@@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
27733 * vty adapters do NOT get an hvc_instantiate() callback since they
27734 * appear after early console init.
27735 */
27736-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
27737+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
27738 {
27739 struct hvc_struct *hp;
27740
27741@@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
27742 };
27743
27744 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
27745- struct hv_ops *ops, int outbuf_size)
27746+ const struct hv_ops *ops, int outbuf_size)
27747 {
27748 struct hvc_struct *hp;
27749 int i;
27750diff -urNp linux-2.6.32.46/drivers/char/hvc_console.h linux-2.6.32.46/drivers/char/hvc_console.h
27751--- linux-2.6.32.46/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
27752+++ linux-2.6.32.46/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
27753@@ -55,7 +55,7 @@ struct hvc_struct {
27754 int outbuf_size;
27755 int n_outbuf;
27756 uint32_t vtermno;
27757- struct hv_ops *ops;
27758+ const struct hv_ops *ops;
27759 int irq_requested;
27760 int data;
27761 struct winsize ws;
27762@@ -76,11 +76,11 @@ struct hv_ops {
27763 };
27764
27765 /* Register a vterm and a slot index for use as a console (console_init) */
27766-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
27767+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
27768
27769 /* register a vterm for hvc tty operation (module_init or hotplug add) */
27770 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
27771- struct hv_ops *ops, int outbuf_size);
27772+ const struct hv_ops *ops, int outbuf_size);
27773 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
27774 extern int hvc_remove(struct hvc_struct *hp);
27775
27776diff -urNp linux-2.6.32.46/drivers/char/hvc_iseries.c linux-2.6.32.46/drivers/char/hvc_iseries.c
27777--- linux-2.6.32.46/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
27778+++ linux-2.6.32.46/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
27779@@ -197,7 +197,7 @@ done:
27780 return sent;
27781 }
27782
27783-static struct hv_ops hvc_get_put_ops = {
27784+static const struct hv_ops hvc_get_put_ops = {
27785 .get_chars = get_chars,
27786 .put_chars = put_chars,
27787 .notifier_add = notifier_add_irq,
27788diff -urNp linux-2.6.32.46/drivers/char/hvc_iucv.c linux-2.6.32.46/drivers/char/hvc_iucv.c
27789--- linux-2.6.32.46/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
27790+++ linux-2.6.32.46/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
27791@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
27792
27793
27794 /* HVC operations */
27795-static struct hv_ops hvc_iucv_ops = {
27796+static const struct hv_ops hvc_iucv_ops = {
27797 .get_chars = hvc_iucv_get_chars,
27798 .put_chars = hvc_iucv_put_chars,
27799 .notifier_add = hvc_iucv_notifier_add,
27800diff -urNp linux-2.6.32.46/drivers/char/hvc_rtas.c linux-2.6.32.46/drivers/char/hvc_rtas.c
27801--- linux-2.6.32.46/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
27802+++ linux-2.6.32.46/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
27803@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
27804 return i;
27805 }
27806
27807-static struct hv_ops hvc_rtas_get_put_ops = {
27808+static const struct hv_ops hvc_rtas_get_put_ops = {
27809 .get_chars = hvc_rtas_read_console,
27810 .put_chars = hvc_rtas_write_console,
27811 };
27812diff -urNp linux-2.6.32.46/drivers/char/hvcs.c linux-2.6.32.46/drivers/char/hvcs.c
27813--- linux-2.6.32.46/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
27814+++ linux-2.6.32.46/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
27815@@ -82,6 +82,7 @@
27816 #include <asm/hvcserver.h>
27817 #include <asm/uaccess.h>
27818 #include <asm/vio.h>
27819+#include <asm/local.h>
27820
27821 /*
27822 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
27823@@ -269,7 +270,7 @@ struct hvcs_struct {
27824 unsigned int index;
27825
27826 struct tty_struct *tty;
27827- int open_count;
27828+ local_t open_count;
27829
27830 /*
27831 * Used to tell the driver kernel_thread what operations need to take
27832@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
27833
27834 spin_lock_irqsave(&hvcsd->lock, flags);
27835
27836- if (hvcsd->open_count > 0) {
27837+ if (local_read(&hvcsd->open_count) > 0) {
27838 spin_unlock_irqrestore(&hvcsd->lock, flags);
27839 printk(KERN_INFO "HVCS: vterm state unchanged. "
27840 "The hvcs device node is still in use.\n");
27841@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
27842 if ((retval = hvcs_partner_connect(hvcsd)))
27843 goto error_release;
27844
27845- hvcsd->open_count = 1;
27846+ local_set(&hvcsd->open_count, 1);
27847 hvcsd->tty = tty;
27848 tty->driver_data = hvcsd;
27849
27850@@ -1169,7 +1170,7 @@ fast_open:
27851
27852 spin_lock_irqsave(&hvcsd->lock, flags);
27853 kref_get(&hvcsd->kref);
27854- hvcsd->open_count++;
27855+ local_inc(&hvcsd->open_count);
27856 hvcsd->todo_mask |= HVCS_SCHED_READ;
27857 spin_unlock_irqrestore(&hvcsd->lock, flags);
27858
27859@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
27860 hvcsd = tty->driver_data;
27861
27862 spin_lock_irqsave(&hvcsd->lock, flags);
27863- if (--hvcsd->open_count == 0) {
27864+ if (local_dec_and_test(&hvcsd->open_count)) {
27865
27866 vio_disable_interrupts(hvcsd->vdev);
27867
27868@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
27869 free_irq(irq, hvcsd);
27870 kref_put(&hvcsd->kref, destroy_hvcs_struct);
27871 return;
27872- } else if (hvcsd->open_count < 0) {
27873+ } else if (local_read(&hvcsd->open_count) < 0) {
27874 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
27875 " is missmanaged.\n",
27876- hvcsd->vdev->unit_address, hvcsd->open_count);
27877+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
27878 }
27879
27880 spin_unlock_irqrestore(&hvcsd->lock, flags);
27881@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
27882
27883 spin_lock_irqsave(&hvcsd->lock, flags);
27884 /* Preserve this so that we know how many kref refs to put */
27885- temp_open_count = hvcsd->open_count;
27886+ temp_open_count = local_read(&hvcsd->open_count);
27887
27888 /*
27889 * Don't kref put inside the spinlock because the destruction
27890@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
27891 hvcsd->tty->driver_data = NULL;
27892 hvcsd->tty = NULL;
27893
27894- hvcsd->open_count = 0;
27895+ local_set(&hvcsd->open_count, 0);
27896
27897 /* This will drop any buffered data on the floor which is OK in a hangup
27898 * scenario. */
27899@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
27900 * the middle of a write operation? This is a crummy place to do this
27901 * but we want to keep it all in the spinlock.
27902 */
27903- if (hvcsd->open_count <= 0) {
27904+ if (local_read(&hvcsd->open_count) <= 0) {
27905 spin_unlock_irqrestore(&hvcsd->lock, flags);
27906 return -ENODEV;
27907 }
27908@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
27909 {
27910 struct hvcs_struct *hvcsd = tty->driver_data;
27911
27912- if (!hvcsd || hvcsd->open_count <= 0)
27913+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
27914 return 0;
27915
27916 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
27917diff -urNp linux-2.6.32.46/drivers/char/hvc_udbg.c linux-2.6.32.46/drivers/char/hvc_udbg.c
27918--- linux-2.6.32.46/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
27919+++ linux-2.6.32.46/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
27920@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
27921 return i;
27922 }
27923
27924-static struct hv_ops hvc_udbg_ops = {
27925+static const struct hv_ops hvc_udbg_ops = {
27926 .get_chars = hvc_udbg_get,
27927 .put_chars = hvc_udbg_put,
27928 };
27929diff -urNp linux-2.6.32.46/drivers/char/hvc_vio.c linux-2.6.32.46/drivers/char/hvc_vio.c
27930--- linux-2.6.32.46/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
27931+++ linux-2.6.32.46/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
27932@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
27933 return got;
27934 }
27935
27936-static struct hv_ops hvc_get_put_ops = {
27937+static const struct hv_ops hvc_get_put_ops = {
27938 .get_chars = filtered_get_chars,
27939 .put_chars = hvc_put_chars,
27940 .notifier_add = notifier_add_irq,
27941diff -urNp linux-2.6.32.46/drivers/char/hvc_xen.c linux-2.6.32.46/drivers/char/hvc_xen.c
27942--- linux-2.6.32.46/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
27943+++ linux-2.6.32.46/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
27944@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
27945 return recv;
27946 }
27947
27948-static struct hv_ops hvc_ops = {
27949+static const struct hv_ops hvc_ops = {
27950 .get_chars = read_console,
27951 .put_chars = write_console,
27952 .notifier_add = notifier_add_irq,
27953diff -urNp linux-2.6.32.46/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.46/drivers/char/ipmi/ipmi_msghandler.c
27954--- linux-2.6.32.46/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
27955+++ linux-2.6.32.46/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
27956@@ -414,7 +414,7 @@ struct ipmi_smi {
27957 struct proc_dir_entry *proc_dir;
27958 char proc_dir_name[10];
27959
27960- atomic_t stats[IPMI_NUM_STATS];
27961+ atomic_unchecked_t stats[IPMI_NUM_STATS];
27962
27963 /*
27964 * run_to_completion duplicate of smb_info, smi_info
27965@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27966
27967
27968 #define ipmi_inc_stat(intf, stat) \
27969- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27970+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27971 #define ipmi_get_stat(intf, stat) \
27972- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27973+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27974
27975 static int is_lan_addr(struct ipmi_addr *addr)
27976 {
27977@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
27978 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27979 init_waitqueue_head(&intf->waitq);
27980 for (i = 0; i < IPMI_NUM_STATS; i++)
27981- atomic_set(&intf->stats[i], 0);
27982+ atomic_set_unchecked(&intf->stats[i], 0);
27983
27984 intf->proc_dir = NULL;
27985
27986@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
27987 struct ipmi_smi_msg smi_msg;
27988 struct ipmi_recv_msg recv_msg;
27989
27990+ pax_track_stack();
27991+
27992 si = (struct ipmi_system_interface_addr *) &addr;
27993 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
27994 si->channel = IPMI_BMC_CHANNEL;
27995diff -urNp linux-2.6.32.46/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.46/drivers/char/ipmi/ipmi_si_intf.c
27996--- linux-2.6.32.46/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
27997+++ linux-2.6.32.46/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
27998@@ -277,7 +277,7 @@ struct smi_info {
27999 unsigned char slave_addr;
28000
28001 /* Counters and things for the proc filesystem. */
28002- atomic_t stats[SI_NUM_STATS];
28003+ atomic_unchecked_t stats[SI_NUM_STATS];
28004
28005 struct task_struct *thread;
28006
28007@@ -285,9 +285,9 @@ struct smi_info {
28008 };
28009
28010 #define smi_inc_stat(smi, stat) \
28011- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
28012+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
28013 #define smi_get_stat(smi, stat) \
28014- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
28015+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
28016
28017 #define SI_MAX_PARMS 4
28018
28019@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
28020 atomic_set(&new_smi->req_events, 0);
28021 new_smi->run_to_completion = 0;
28022 for (i = 0; i < SI_NUM_STATS; i++)
28023- atomic_set(&new_smi->stats[i], 0);
28024+ atomic_set_unchecked(&new_smi->stats[i], 0);
28025
28026 new_smi->interrupt_disabled = 0;
28027 atomic_set(&new_smi->stop_operation, 0);
28028diff -urNp linux-2.6.32.46/drivers/char/istallion.c linux-2.6.32.46/drivers/char/istallion.c
28029--- linux-2.6.32.46/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
28030+++ linux-2.6.32.46/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
28031@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
28032 * re-used for each stats call.
28033 */
28034 static comstats_t stli_comstats;
28035-static combrd_t stli_brdstats;
28036 static struct asystats stli_cdkstats;
28037
28038 /*****************************************************************************/
28039@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
28040 {
28041 struct stlibrd *brdp;
28042 unsigned int i;
28043+ combrd_t stli_brdstats;
28044
28045 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
28046 return -EFAULT;
28047@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
28048 struct stliport stli_dummyport;
28049 struct stliport *portp;
28050
28051+ pax_track_stack();
28052+
28053 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
28054 return -EFAULT;
28055 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
28056@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
28057 struct stlibrd stli_dummybrd;
28058 struct stlibrd *brdp;
28059
28060+ pax_track_stack();
28061+
28062 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
28063 return -EFAULT;
28064 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
28065diff -urNp linux-2.6.32.46/drivers/char/Kconfig linux-2.6.32.46/drivers/char/Kconfig
28066--- linux-2.6.32.46/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
28067+++ linux-2.6.32.46/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
28068@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
28069
28070 config DEVKMEM
28071 bool "/dev/kmem virtual device support"
28072- default y
28073+ default n
28074+ depends on !GRKERNSEC_KMEM
28075 help
28076 Say Y here if you want to support the /dev/kmem device. The
28077 /dev/kmem device is rarely used, but can be used for certain
28078@@ -1114,6 +1115,7 @@ config DEVPORT
28079 bool
28080 depends on !M68K
28081 depends on ISA || PCI
28082+ depends on !GRKERNSEC_KMEM
28083 default y
28084
28085 source "drivers/s390/char/Kconfig"
28086diff -urNp linux-2.6.32.46/drivers/char/keyboard.c linux-2.6.32.46/drivers/char/keyboard.c
28087--- linux-2.6.32.46/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
28088+++ linux-2.6.32.46/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
28089@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
28090 kbd->kbdmode == VC_MEDIUMRAW) &&
28091 value != KVAL(K_SAK))
28092 return; /* SAK is allowed even in raw mode */
28093+
28094+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
28095+ {
28096+ void *func = fn_handler[value];
28097+ if (func == fn_show_state || func == fn_show_ptregs ||
28098+ func == fn_show_mem)
28099+ return;
28100+ }
28101+#endif
28102+
28103 fn_handler[value](vc);
28104 }
28105
28106@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
28107 .evbit = { BIT_MASK(EV_SND) },
28108 },
28109
28110- { }, /* Terminating entry */
28111+ { 0 }, /* Terminating entry */
28112 };
28113
28114 MODULE_DEVICE_TABLE(input, kbd_ids);
28115diff -urNp linux-2.6.32.46/drivers/char/mem.c linux-2.6.32.46/drivers/char/mem.c
28116--- linux-2.6.32.46/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
28117+++ linux-2.6.32.46/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
28118@@ -18,6 +18,7 @@
28119 #include <linux/raw.h>
28120 #include <linux/tty.h>
28121 #include <linux/capability.h>
28122+#include <linux/security.h>
28123 #include <linux/ptrace.h>
28124 #include <linux/device.h>
28125 #include <linux/highmem.h>
28126@@ -35,6 +36,10 @@
28127 # include <linux/efi.h>
28128 #endif
28129
28130+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28131+extern struct file_operations grsec_fops;
28132+#endif
28133+
28134 static inline unsigned long size_inside_page(unsigned long start,
28135 unsigned long size)
28136 {
28137@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
28138
28139 while (cursor < to) {
28140 if (!devmem_is_allowed(pfn)) {
28141+#ifdef CONFIG_GRKERNSEC_KMEM
28142+ gr_handle_mem_readwrite(from, to);
28143+#else
28144 printk(KERN_INFO
28145 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
28146 current->comm, from, to);
28147+#endif
28148 return 0;
28149 }
28150 cursor += PAGE_SIZE;
28151@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
28152 }
28153 return 1;
28154 }
28155+#elif defined(CONFIG_GRKERNSEC_KMEM)
28156+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28157+{
28158+ return 0;
28159+}
28160 #else
28161 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28162 {
28163@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
28164 #endif
28165
28166 while (count > 0) {
28167+ char *temp;
28168+
28169 /*
28170 * Handle first page in case it's not aligned
28171 */
28172@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
28173 if (!ptr)
28174 return -EFAULT;
28175
28176- if (copy_to_user(buf, ptr, sz)) {
28177+#ifdef CONFIG_PAX_USERCOPY
28178+ temp = kmalloc(sz, GFP_KERNEL);
28179+ if (!temp) {
28180+ unxlate_dev_mem_ptr(p, ptr);
28181+ return -ENOMEM;
28182+ }
28183+ memcpy(temp, ptr, sz);
28184+#else
28185+ temp = ptr;
28186+#endif
28187+
28188+ if (copy_to_user(buf, temp, sz)) {
28189+
28190+#ifdef CONFIG_PAX_USERCOPY
28191+ kfree(temp);
28192+#endif
28193+
28194 unxlate_dev_mem_ptr(p, ptr);
28195 return -EFAULT;
28196 }
28197
28198+#ifdef CONFIG_PAX_USERCOPY
28199+ kfree(temp);
28200+#endif
28201+
28202 unxlate_dev_mem_ptr(p, ptr);
28203
28204 buf += sz;
28205@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
28206 size_t count, loff_t *ppos)
28207 {
28208 unsigned long p = *ppos;
28209- ssize_t low_count, read, sz;
28210+ ssize_t low_count, read, sz, err = 0;
28211 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
28212- int err = 0;
28213
28214 read = 0;
28215 if (p < (unsigned long) high_memory) {
28216@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
28217 }
28218 #endif
28219 while (low_count > 0) {
28220+ char *temp;
28221+
28222 sz = size_inside_page(p, low_count);
28223
28224 /*
28225@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
28226 */
28227 kbuf = xlate_dev_kmem_ptr((char *)p);
28228
28229- if (copy_to_user(buf, kbuf, sz))
28230+#ifdef CONFIG_PAX_USERCOPY
28231+ temp = kmalloc(sz, GFP_KERNEL);
28232+ if (!temp)
28233+ return -ENOMEM;
28234+ memcpy(temp, kbuf, sz);
28235+#else
28236+ temp = kbuf;
28237+#endif
28238+
28239+ err = copy_to_user(buf, temp, sz);
28240+
28241+#ifdef CONFIG_PAX_USERCOPY
28242+ kfree(temp);
28243+#endif
28244+
28245+ if (err)
28246 return -EFAULT;
28247 buf += sz;
28248 p += sz;
28249@@ -889,6 +941,9 @@ static const struct memdev {
28250 #ifdef CONFIG_CRASH_DUMP
28251 [12] = { "oldmem", 0, &oldmem_fops, NULL },
28252 #endif
28253+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28254+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
28255+#endif
28256 };
28257
28258 static int memory_open(struct inode *inode, struct file *filp)
28259diff -urNp linux-2.6.32.46/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.46/drivers/char/pcmcia/ipwireless/tty.c
28260--- linux-2.6.32.46/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
28261+++ linux-2.6.32.46/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
28262@@ -29,6 +29,7 @@
28263 #include <linux/tty_driver.h>
28264 #include <linux/tty_flip.h>
28265 #include <linux/uaccess.h>
28266+#include <asm/local.h>
28267
28268 #include "tty.h"
28269 #include "network.h"
28270@@ -51,7 +52,7 @@ struct ipw_tty {
28271 int tty_type;
28272 struct ipw_network *network;
28273 struct tty_struct *linux_tty;
28274- int open_count;
28275+ local_t open_count;
28276 unsigned int control_lines;
28277 struct mutex ipw_tty_mutex;
28278 int tx_bytes_queued;
28279@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
28280 mutex_unlock(&tty->ipw_tty_mutex);
28281 return -ENODEV;
28282 }
28283- if (tty->open_count == 0)
28284+ if (local_read(&tty->open_count) == 0)
28285 tty->tx_bytes_queued = 0;
28286
28287- tty->open_count++;
28288+ local_inc(&tty->open_count);
28289
28290 tty->linux_tty = linux_tty;
28291 linux_tty->driver_data = tty;
28292@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
28293
28294 static void do_ipw_close(struct ipw_tty *tty)
28295 {
28296- tty->open_count--;
28297-
28298- if (tty->open_count == 0) {
28299+ if (local_dec_return(&tty->open_count) == 0) {
28300 struct tty_struct *linux_tty = tty->linux_tty;
28301
28302 if (linux_tty != NULL) {
28303@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
28304 return;
28305
28306 mutex_lock(&tty->ipw_tty_mutex);
28307- if (tty->open_count == 0) {
28308+ if (local_read(&tty->open_count) == 0) {
28309 mutex_unlock(&tty->ipw_tty_mutex);
28310 return;
28311 }
28312@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
28313 return;
28314 }
28315
28316- if (!tty->open_count) {
28317+ if (!local_read(&tty->open_count)) {
28318 mutex_unlock(&tty->ipw_tty_mutex);
28319 return;
28320 }
28321@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
28322 return -ENODEV;
28323
28324 mutex_lock(&tty->ipw_tty_mutex);
28325- if (!tty->open_count) {
28326+ if (!local_read(&tty->open_count)) {
28327 mutex_unlock(&tty->ipw_tty_mutex);
28328 return -EINVAL;
28329 }
28330@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
28331 if (!tty)
28332 return -ENODEV;
28333
28334- if (!tty->open_count)
28335+ if (!local_read(&tty->open_count))
28336 return -EINVAL;
28337
28338 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
28339@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
28340 if (!tty)
28341 return 0;
28342
28343- if (!tty->open_count)
28344+ if (!local_read(&tty->open_count))
28345 return 0;
28346
28347 return tty->tx_bytes_queued;
28348@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
28349 if (!tty)
28350 return -ENODEV;
28351
28352- if (!tty->open_count)
28353+ if (!local_read(&tty->open_count))
28354 return -EINVAL;
28355
28356 return get_control_lines(tty);
28357@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
28358 if (!tty)
28359 return -ENODEV;
28360
28361- if (!tty->open_count)
28362+ if (!local_read(&tty->open_count))
28363 return -EINVAL;
28364
28365 return set_control_lines(tty, set, clear);
28366@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
28367 if (!tty)
28368 return -ENODEV;
28369
28370- if (!tty->open_count)
28371+ if (!local_read(&tty->open_count))
28372 return -EINVAL;
28373
28374 /* FIXME: Exactly how is the tty object locked here .. */
28375@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
28376 against a parallel ioctl etc */
28377 mutex_lock(&ttyj->ipw_tty_mutex);
28378 }
28379- while (ttyj->open_count)
28380+ while (local_read(&ttyj->open_count))
28381 do_ipw_close(ttyj);
28382 ipwireless_disassociate_network_ttys(network,
28383 ttyj->channel_idx);
28384diff -urNp linux-2.6.32.46/drivers/char/pty.c linux-2.6.32.46/drivers/char/pty.c
28385--- linux-2.6.32.46/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
28386+++ linux-2.6.32.46/drivers/char/pty.c 2011-08-05 20:33:55.000000000 -0400
28387@@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
28388 register_sysctl_table(pty_root_table);
28389
28390 /* Now create the /dev/ptmx special device */
28391+ pax_open_kernel();
28392 tty_default_fops(&ptmx_fops);
28393- ptmx_fops.open = ptmx_open;
28394+ *(void **)&ptmx_fops.open = ptmx_open;
28395+ pax_close_kernel();
28396
28397 cdev_init(&ptmx_cdev, &ptmx_fops);
28398 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
28399diff -urNp linux-2.6.32.46/drivers/char/random.c linux-2.6.32.46/drivers/char/random.c
28400--- linux-2.6.32.46/drivers/char/random.c 2011-08-16 20:37:25.000000000 -0400
28401+++ linux-2.6.32.46/drivers/char/random.c 2011-08-16 20:43:23.000000000 -0400
28402@@ -254,8 +254,13 @@
28403 /*
28404 * Configuration information
28405 */
28406+#ifdef CONFIG_GRKERNSEC_RANDNET
28407+#define INPUT_POOL_WORDS 512
28408+#define OUTPUT_POOL_WORDS 128
28409+#else
28410 #define INPUT_POOL_WORDS 128
28411 #define OUTPUT_POOL_WORDS 32
28412+#endif
28413 #define SEC_XFER_SIZE 512
28414
28415 /*
28416@@ -292,10 +297,17 @@ static struct poolinfo {
28417 int poolwords;
28418 int tap1, tap2, tap3, tap4, tap5;
28419 } poolinfo_table[] = {
28420+#ifdef CONFIG_GRKERNSEC_RANDNET
28421+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
28422+ { 512, 411, 308, 208, 104, 1 },
28423+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
28424+ { 128, 103, 76, 51, 25, 1 },
28425+#else
28426 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
28427 { 128, 103, 76, 51, 25, 1 },
28428 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
28429 { 32, 26, 20, 14, 7, 1 },
28430+#endif
28431 #if 0
28432 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
28433 { 2048, 1638, 1231, 819, 411, 1 },
28434@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
28435 #include <linux/sysctl.h>
28436
28437 static int min_read_thresh = 8, min_write_thresh;
28438-static int max_read_thresh = INPUT_POOL_WORDS * 32;
28439+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
28440 static int max_write_thresh = INPUT_POOL_WORDS * 32;
28441 static char sysctl_bootid[16];
28442
28443diff -urNp linux-2.6.32.46/drivers/char/rocket.c linux-2.6.32.46/drivers/char/rocket.c
28444--- linux-2.6.32.46/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
28445+++ linux-2.6.32.46/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
28446@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
28447 struct rocket_ports tmp;
28448 int board;
28449
28450+ pax_track_stack();
28451+
28452 if (!retports)
28453 return -EFAULT;
28454 memset(&tmp, 0, sizeof (tmp));
28455diff -urNp linux-2.6.32.46/drivers/char/sonypi.c linux-2.6.32.46/drivers/char/sonypi.c
28456--- linux-2.6.32.46/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
28457+++ linux-2.6.32.46/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
28458@@ -55,6 +55,7 @@
28459 #include <asm/uaccess.h>
28460 #include <asm/io.h>
28461 #include <asm/system.h>
28462+#include <asm/local.h>
28463
28464 #include <linux/sonypi.h>
28465
28466@@ -491,7 +492,7 @@ static struct sonypi_device {
28467 spinlock_t fifo_lock;
28468 wait_queue_head_t fifo_proc_list;
28469 struct fasync_struct *fifo_async;
28470- int open_count;
28471+ local_t open_count;
28472 int model;
28473 struct input_dev *input_jog_dev;
28474 struct input_dev *input_key_dev;
28475@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
28476 static int sonypi_misc_release(struct inode *inode, struct file *file)
28477 {
28478 mutex_lock(&sonypi_device.lock);
28479- sonypi_device.open_count--;
28480+ local_dec(&sonypi_device.open_count);
28481 mutex_unlock(&sonypi_device.lock);
28482 return 0;
28483 }
28484@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
28485 lock_kernel();
28486 mutex_lock(&sonypi_device.lock);
28487 /* Flush input queue on first open */
28488- if (!sonypi_device.open_count)
28489+ if (!local_read(&sonypi_device.open_count))
28490 kfifo_reset(sonypi_device.fifo);
28491- sonypi_device.open_count++;
28492+ local_inc(&sonypi_device.open_count);
28493 mutex_unlock(&sonypi_device.lock);
28494 unlock_kernel();
28495 return 0;
28496diff -urNp linux-2.6.32.46/drivers/char/stallion.c linux-2.6.32.46/drivers/char/stallion.c
28497--- linux-2.6.32.46/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
28498+++ linux-2.6.32.46/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
28499@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
28500 struct stlport stl_dummyport;
28501 struct stlport *portp;
28502
28503+ pax_track_stack();
28504+
28505 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
28506 return -EFAULT;
28507 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
28508diff -urNp linux-2.6.32.46/drivers/char/tpm/tpm_bios.c linux-2.6.32.46/drivers/char/tpm/tpm_bios.c
28509--- linux-2.6.32.46/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
28510+++ linux-2.6.32.46/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
28511@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
28512 event = addr;
28513
28514 if ((event->event_type == 0 && event->event_size == 0) ||
28515- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
28516+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
28517 return NULL;
28518
28519 return addr;
28520@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
28521 return NULL;
28522
28523 if ((event->event_type == 0 && event->event_size == 0) ||
28524- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
28525+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
28526 return NULL;
28527
28528 (*pos)++;
28529@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
28530 int i;
28531
28532 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
28533- seq_putc(m, data[i]);
28534+ if (!seq_putc(m, data[i]))
28535+ return -EFAULT;
28536
28537 return 0;
28538 }
28539@@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
28540 log->bios_event_log_end = log->bios_event_log + len;
28541
28542 virt = acpi_os_map_memory(start, len);
28543+ if (!virt) {
28544+ kfree(log->bios_event_log);
28545+ log->bios_event_log = NULL;
28546+ return -EFAULT;
28547+ }
28548
28549 memcpy(log->bios_event_log, virt, len);
28550
28551diff -urNp linux-2.6.32.46/drivers/char/tpm/tpm.c linux-2.6.32.46/drivers/char/tpm/tpm.c
28552--- linux-2.6.32.46/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
28553+++ linux-2.6.32.46/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
28554@@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
28555 chip->vendor.req_complete_val)
28556 goto out_recv;
28557
28558- if ((status == chip->vendor.req_canceled)) {
28559+ if (status == chip->vendor.req_canceled) {
28560 dev_err(chip->dev, "Operation Canceled\n");
28561 rc = -ECANCELED;
28562 goto out;
28563@@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
28564
28565 struct tpm_chip *chip = dev_get_drvdata(dev);
28566
28567+ pax_track_stack();
28568+
28569 tpm_cmd.header.in = tpm_readpubek_header;
28570 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
28571 "attempting to read the PUBEK");
28572diff -urNp linux-2.6.32.46/drivers/char/tty_io.c linux-2.6.32.46/drivers/char/tty_io.c
28573--- linux-2.6.32.46/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
28574+++ linux-2.6.32.46/drivers/char/tty_io.c 2011-08-05 20:33:55.000000000 -0400
28575@@ -2582,8 +2582,10 @@ long tty_ioctl(struct file *file, unsign
28576 return retval;
28577 }
28578
28579+EXPORT_SYMBOL(tty_ioctl);
28580+
28581 #ifdef CONFIG_COMPAT
28582-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
28583+long tty_compat_ioctl(struct file *file, unsigned int cmd,
28584 unsigned long arg)
28585 {
28586 struct inode *inode = file->f_dentry->d_inode;
28587@@ -2607,6 +2609,8 @@ static long tty_compat_ioctl(struct file
28588
28589 return retval;
28590 }
28591+
28592+EXPORT_SYMBOL(tty_compat_ioctl);
28593 #endif
28594
28595 /*
28596@@ -3052,7 +3056,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
28597
28598 void tty_default_fops(struct file_operations *fops)
28599 {
28600- *fops = tty_fops;
28601+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
28602 }
28603
28604 /*
28605diff -urNp linux-2.6.32.46/drivers/char/tty_ldisc.c linux-2.6.32.46/drivers/char/tty_ldisc.c
28606--- linux-2.6.32.46/drivers/char/tty_ldisc.c 2011-07-13 17:23:04.000000000 -0400
28607+++ linux-2.6.32.46/drivers/char/tty_ldisc.c 2011-07-13 17:23:18.000000000 -0400
28608@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
28609 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
28610 struct tty_ldisc_ops *ldo = ld->ops;
28611
28612- ldo->refcount--;
28613+ atomic_dec(&ldo->refcount);
28614 module_put(ldo->owner);
28615 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28616
28617@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
28618 spin_lock_irqsave(&tty_ldisc_lock, flags);
28619 tty_ldiscs[disc] = new_ldisc;
28620 new_ldisc->num = disc;
28621- new_ldisc->refcount = 0;
28622+ atomic_set(&new_ldisc->refcount, 0);
28623 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28624
28625 return ret;
28626@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
28627 return -EINVAL;
28628
28629 spin_lock_irqsave(&tty_ldisc_lock, flags);
28630- if (tty_ldiscs[disc]->refcount)
28631+ if (atomic_read(&tty_ldiscs[disc]->refcount))
28632 ret = -EBUSY;
28633 else
28634 tty_ldiscs[disc] = NULL;
28635@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
28636 if (ldops) {
28637 ret = ERR_PTR(-EAGAIN);
28638 if (try_module_get(ldops->owner)) {
28639- ldops->refcount++;
28640+ atomic_inc(&ldops->refcount);
28641 ret = ldops;
28642 }
28643 }
28644@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
28645 unsigned long flags;
28646
28647 spin_lock_irqsave(&tty_ldisc_lock, flags);
28648- ldops->refcount--;
28649+ atomic_dec(&ldops->refcount);
28650 module_put(ldops->owner);
28651 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28652 }
28653diff -urNp linux-2.6.32.46/drivers/char/virtio_console.c linux-2.6.32.46/drivers/char/virtio_console.c
28654--- linux-2.6.32.46/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
28655+++ linux-2.6.32.46/drivers/char/virtio_console.c 2011-08-05 20:33:55.000000000 -0400
28656@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *
28657 * virtqueue, so we let the drivers do some boutique early-output thing. */
28658 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
28659 {
28660- virtio_cons.put_chars = put_chars;
28661+ pax_open_kernel();
28662+ *(void **)&virtio_cons.put_chars = put_chars;
28663+ pax_close_kernel();
28664 return hvc_instantiate(0, 0, &virtio_cons);
28665 }
28666
28667@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(stru
28668 out_vq = vqs[1];
28669
28670 /* Start using the new console output. */
28671- virtio_cons.get_chars = get_chars;
28672- virtio_cons.put_chars = put_chars;
28673- virtio_cons.notifier_add = notifier_add_vio;
28674- virtio_cons.notifier_del = notifier_del_vio;
28675- virtio_cons.notifier_hangup = notifier_del_vio;
28676+ pax_open_kernel();
28677+ *(void **)&virtio_cons.get_chars = get_chars;
28678+ *(void **)&virtio_cons.put_chars = put_chars;
28679+ *(void **)&virtio_cons.notifier_add = notifier_add_vio;
28680+ *(void **)&virtio_cons.notifier_del = notifier_del_vio;
28681+ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
28682+ pax_close_kernel();
28683
28684 /* The first argument of hvc_alloc() is the virtual console number, so
28685 * we use zero. The second argument is the parameter for the
28686diff -urNp linux-2.6.32.46/drivers/char/vt.c linux-2.6.32.46/drivers/char/vt.c
28687--- linux-2.6.32.46/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
28688+++ linux-2.6.32.46/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
28689@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
28690
28691 static void notify_write(struct vc_data *vc, unsigned int unicode)
28692 {
28693- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
28694+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
28695 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
28696 }
28697
28698diff -urNp linux-2.6.32.46/drivers/char/vt_ioctl.c linux-2.6.32.46/drivers/char/vt_ioctl.c
28699--- linux-2.6.32.46/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28700+++ linux-2.6.32.46/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28701@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28702 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
28703 return -EFAULT;
28704
28705- if (!capable(CAP_SYS_TTY_CONFIG))
28706- perm = 0;
28707-
28708 switch (cmd) {
28709 case KDGKBENT:
28710 key_map = key_maps[s];
28711@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28712 val = (i ? K_HOLE : K_NOSUCHMAP);
28713 return put_user(val, &user_kbe->kb_value);
28714 case KDSKBENT:
28715+ if (!capable(CAP_SYS_TTY_CONFIG))
28716+ perm = 0;
28717+
28718 if (!perm)
28719 return -EPERM;
28720+
28721 if (!i && v == K_NOSUCHMAP) {
28722 /* deallocate map */
28723 key_map = key_maps[s];
28724@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28725 int i, j, k;
28726 int ret;
28727
28728- if (!capable(CAP_SYS_TTY_CONFIG))
28729- perm = 0;
28730-
28731 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
28732 if (!kbs) {
28733 ret = -ENOMEM;
28734@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28735 kfree(kbs);
28736 return ((p && *p) ? -EOVERFLOW : 0);
28737 case KDSKBSENT:
28738+ if (!capable(CAP_SYS_TTY_CONFIG))
28739+ perm = 0;
28740+
28741 if (!perm) {
28742 ret = -EPERM;
28743 goto reterr;
28744diff -urNp linux-2.6.32.46/drivers/cpufreq/cpufreq.c linux-2.6.32.46/drivers/cpufreq/cpufreq.c
28745--- linux-2.6.32.46/drivers/cpufreq/cpufreq.c 2011-06-25 12:55:34.000000000 -0400
28746+++ linux-2.6.32.46/drivers/cpufreq/cpufreq.c 2011-06-25 12:56:37.000000000 -0400
28747@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
28748 complete(&policy->kobj_unregister);
28749 }
28750
28751-static struct sysfs_ops sysfs_ops = {
28752+static const struct sysfs_ops sysfs_ops = {
28753 .show = show,
28754 .store = store,
28755 };
28756diff -urNp linux-2.6.32.46/drivers/cpuidle/sysfs.c linux-2.6.32.46/drivers/cpuidle/sysfs.c
28757--- linux-2.6.32.46/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
28758+++ linux-2.6.32.46/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
28759@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
28760 return ret;
28761 }
28762
28763-static struct sysfs_ops cpuidle_sysfs_ops = {
28764+static const struct sysfs_ops cpuidle_sysfs_ops = {
28765 .show = cpuidle_show,
28766 .store = cpuidle_store,
28767 };
28768@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
28769 return ret;
28770 }
28771
28772-static struct sysfs_ops cpuidle_state_sysfs_ops = {
28773+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
28774 .show = cpuidle_state_show,
28775 };
28776
28777@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
28778 .release = cpuidle_state_sysfs_release,
28779 };
28780
28781-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28782+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28783 {
28784 kobject_put(&device->kobjs[i]->kobj);
28785 wait_for_completion(&device->kobjs[i]->kobj_unregister);
28786diff -urNp linux-2.6.32.46/drivers/crypto/hifn_795x.c linux-2.6.32.46/drivers/crypto/hifn_795x.c
28787--- linux-2.6.32.46/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
28788+++ linux-2.6.32.46/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
28789@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
28790 0xCA, 0x34, 0x2B, 0x2E};
28791 struct scatterlist sg;
28792
28793+ pax_track_stack();
28794+
28795 memset(src, 0, sizeof(src));
28796 memset(ctx.key, 0, sizeof(ctx.key));
28797
28798diff -urNp linux-2.6.32.46/drivers/crypto/padlock-aes.c linux-2.6.32.46/drivers/crypto/padlock-aes.c
28799--- linux-2.6.32.46/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
28800+++ linux-2.6.32.46/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
28801@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
28802 struct crypto_aes_ctx gen_aes;
28803 int cpu;
28804
28805+ pax_track_stack();
28806+
28807 if (key_len % 8) {
28808 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
28809 return -EINVAL;
28810diff -urNp linux-2.6.32.46/drivers/dma/ioat/dma.c linux-2.6.32.46/drivers/dma/ioat/dma.c
28811--- linux-2.6.32.46/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
28812+++ linux-2.6.32.46/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
28813@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
28814 return entry->show(&chan->common, page);
28815 }
28816
28817-struct sysfs_ops ioat_sysfs_ops = {
28818+const struct sysfs_ops ioat_sysfs_ops = {
28819 .show = ioat_attr_show,
28820 };
28821
28822diff -urNp linux-2.6.32.46/drivers/dma/ioat/dma.h linux-2.6.32.46/drivers/dma/ioat/dma.h
28823--- linux-2.6.32.46/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
28824+++ linux-2.6.32.46/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
28825@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
28826 unsigned long *phys_complete);
28827 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
28828 void ioat_kobject_del(struct ioatdma_device *device);
28829-extern struct sysfs_ops ioat_sysfs_ops;
28830+extern const struct sysfs_ops ioat_sysfs_ops;
28831 extern struct ioat_sysfs_entry ioat_version_attr;
28832 extern struct ioat_sysfs_entry ioat_cap_attr;
28833 #endif /* IOATDMA_H */
28834diff -urNp linux-2.6.32.46/drivers/edac/edac_device_sysfs.c linux-2.6.32.46/drivers/edac/edac_device_sysfs.c
28835--- linux-2.6.32.46/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28836+++ linux-2.6.32.46/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28837@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
28838 }
28839
28840 /* edac_dev file operations for an 'ctl_info' */
28841-static struct sysfs_ops device_ctl_info_ops = {
28842+static const struct sysfs_ops device_ctl_info_ops = {
28843 .show = edac_dev_ctl_info_show,
28844 .store = edac_dev_ctl_info_store
28845 };
28846@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
28847 }
28848
28849 /* edac_dev file operations for an 'instance' */
28850-static struct sysfs_ops device_instance_ops = {
28851+static const struct sysfs_ops device_instance_ops = {
28852 .show = edac_dev_instance_show,
28853 .store = edac_dev_instance_store
28854 };
28855@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
28856 }
28857
28858 /* edac_dev file operations for a 'block' */
28859-static struct sysfs_ops device_block_ops = {
28860+static const struct sysfs_ops device_block_ops = {
28861 .show = edac_dev_block_show,
28862 .store = edac_dev_block_store
28863 };
28864diff -urNp linux-2.6.32.46/drivers/edac/edac_mc_sysfs.c linux-2.6.32.46/drivers/edac/edac_mc_sysfs.c
28865--- linux-2.6.32.46/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28866+++ linux-2.6.32.46/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28867@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
28868 return -EIO;
28869 }
28870
28871-static struct sysfs_ops csrowfs_ops = {
28872+static const struct sysfs_ops csrowfs_ops = {
28873 .show = csrowdev_show,
28874 .store = csrowdev_store
28875 };
28876@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
28877 }
28878
28879 /* Intermediate show/store table */
28880-static struct sysfs_ops mci_ops = {
28881+static const struct sysfs_ops mci_ops = {
28882 .show = mcidev_show,
28883 .store = mcidev_store
28884 };
28885diff -urNp linux-2.6.32.46/drivers/edac/edac_pci_sysfs.c linux-2.6.32.46/drivers/edac/edac_pci_sysfs.c
28886--- linux-2.6.32.46/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28887+++ linux-2.6.32.46/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
28888@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
28889 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
28890 static int edac_pci_poll_msec = 1000; /* one second workq period */
28891
28892-static atomic_t pci_parity_count = ATOMIC_INIT(0);
28893-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
28894+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
28895+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
28896
28897 static struct kobject *edac_pci_top_main_kobj;
28898 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
28899@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
28900 }
28901
28902 /* fs_ops table */
28903-static struct sysfs_ops pci_instance_ops = {
28904+static const struct sysfs_ops pci_instance_ops = {
28905 .show = edac_pci_instance_show,
28906 .store = edac_pci_instance_store
28907 };
28908@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
28909 return -EIO;
28910 }
28911
28912-static struct sysfs_ops edac_pci_sysfs_ops = {
28913+static const struct sysfs_ops edac_pci_sysfs_ops = {
28914 .show = edac_pci_dev_show,
28915 .store = edac_pci_dev_store
28916 };
28917@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
28918 edac_printk(KERN_CRIT, EDAC_PCI,
28919 "Signaled System Error on %s\n",
28920 pci_name(dev));
28921- atomic_inc(&pci_nonparity_count);
28922+ atomic_inc_unchecked(&pci_nonparity_count);
28923 }
28924
28925 if (status & (PCI_STATUS_PARITY)) {
28926@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
28927 "Master Data Parity Error on %s\n",
28928 pci_name(dev));
28929
28930- atomic_inc(&pci_parity_count);
28931+ atomic_inc_unchecked(&pci_parity_count);
28932 }
28933
28934 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28935@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
28936 "Detected Parity Error on %s\n",
28937 pci_name(dev));
28938
28939- atomic_inc(&pci_parity_count);
28940+ atomic_inc_unchecked(&pci_parity_count);
28941 }
28942 }
28943
28944@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
28945 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
28946 "Signaled System Error on %s\n",
28947 pci_name(dev));
28948- atomic_inc(&pci_nonparity_count);
28949+ atomic_inc_unchecked(&pci_nonparity_count);
28950 }
28951
28952 if (status & (PCI_STATUS_PARITY)) {
28953@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
28954 "Master Data Parity Error on "
28955 "%s\n", pci_name(dev));
28956
28957- atomic_inc(&pci_parity_count);
28958+ atomic_inc_unchecked(&pci_parity_count);
28959 }
28960
28961 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28962@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
28963 "Detected Parity Error on %s\n",
28964 pci_name(dev));
28965
28966- atomic_inc(&pci_parity_count);
28967+ atomic_inc_unchecked(&pci_parity_count);
28968 }
28969 }
28970 }
28971@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
28972 if (!check_pci_errors)
28973 return;
28974
28975- before_count = atomic_read(&pci_parity_count);
28976+ before_count = atomic_read_unchecked(&pci_parity_count);
28977
28978 /* scan all PCI devices looking for a Parity Error on devices and
28979 * bridges.
28980@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
28981 /* Only if operator has selected panic on PCI Error */
28982 if (edac_pci_get_panic_on_pe()) {
28983 /* If the count is different 'after' from 'before' */
28984- if (before_count != atomic_read(&pci_parity_count))
28985+ if (before_count != atomic_read_unchecked(&pci_parity_count))
28986 panic("EDAC: PCI Parity Error");
28987 }
28988 }
28989diff -urNp linux-2.6.32.46/drivers/firewire/core-card.c linux-2.6.32.46/drivers/firewire/core-card.c
28990--- linux-2.6.32.46/drivers/firewire/core-card.c 2011-03-27 14:31:47.000000000 -0400
28991+++ linux-2.6.32.46/drivers/firewire/core-card.c 2011-08-23 21:22:32.000000000 -0400
28992@@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
28993
28994 void fw_core_remove_card(struct fw_card *card)
28995 {
28996- struct fw_card_driver dummy_driver = dummy_driver_template;
28997+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
28998
28999 card->driver->update_phy_reg(card, 4,
29000 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29001diff -urNp linux-2.6.32.46/drivers/firewire/core-cdev.c linux-2.6.32.46/drivers/firewire/core-cdev.c
29002--- linux-2.6.32.46/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
29003+++ linux-2.6.32.46/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
29004@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
29005 int ret;
29006
29007 if ((request->channels == 0 && request->bandwidth == 0) ||
29008- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29009- request->bandwidth < 0)
29010+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29011 return -EINVAL;
29012
29013 r = kmalloc(sizeof(*r), GFP_KERNEL);
29014diff -urNp linux-2.6.32.46/drivers/firewire/core.h linux-2.6.32.46/drivers/firewire/core.h
29015--- linux-2.6.32.46/drivers/firewire/core.h 2011-03-27 14:31:47.000000000 -0400
29016+++ linux-2.6.32.46/drivers/firewire/core.h 2011-08-23 20:24:26.000000000 -0400
29017@@ -86,6 +86,7 @@ struct fw_card_driver {
29018
29019 int (*stop_iso)(struct fw_iso_context *ctx);
29020 };
29021+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
29022
29023 void fw_card_initialize(struct fw_card *card,
29024 const struct fw_card_driver *driver, struct device *device);
29025diff -urNp linux-2.6.32.46/drivers/firewire/core-transaction.c linux-2.6.32.46/drivers/firewire/core-transaction.c
29026--- linux-2.6.32.46/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
29027+++ linux-2.6.32.46/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
29028@@ -36,6 +36,7 @@
29029 #include <linux/string.h>
29030 #include <linux/timer.h>
29031 #include <linux/types.h>
29032+#include <linux/sched.h>
29033
29034 #include <asm/byteorder.h>
29035
29036@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
29037 struct transaction_callback_data d;
29038 struct fw_transaction t;
29039
29040+ pax_track_stack();
29041+
29042 init_completion(&d.done);
29043 d.payload = payload;
29044 fw_send_request(card, &t, tcode, destination_id, generation, speed,
29045diff -urNp linux-2.6.32.46/drivers/firmware/dmi_scan.c linux-2.6.32.46/drivers/firmware/dmi_scan.c
29046--- linux-2.6.32.46/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
29047+++ linux-2.6.32.46/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
29048@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
29049 }
29050 }
29051 else {
29052- /*
29053- * no iounmap() for that ioremap(); it would be a no-op, but
29054- * it's so early in setup that sucker gets confused into doing
29055- * what it shouldn't if we actually call it.
29056- */
29057 p = dmi_ioremap(0xF0000, 0x10000);
29058 if (p == NULL)
29059 goto error;
29060diff -urNp linux-2.6.32.46/drivers/firmware/edd.c linux-2.6.32.46/drivers/firmware/edd.c
29061--- linux-2.6.32.46/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
29062+++ linux-2.6.32.46/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
29063@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
29064 return ret;
29065 }
29066
29067-static struct sysfs_ops edd_attr_ops = {
29068+static const struct sysfs_ops edd_attr_ops = {
29069 .show = edd_attr_show,
29070 };
29071
29072diff -urNp linux-2.6.32.46/drivers/firmware/efivars.c linux-2.6.32.46/drivers/firmware/efivars.c
29073--- linux-2.6.32.46/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
29074+++ linux-2.6.32.46/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
29075@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
29076 return ret;
29077 }
29078
29079-static struct sysfs_ops efivar_attr_ops = {
29080+static const struct sysfs_ops efivar_attr_ops = {
29081 .show = efivar_attr_show,
29082 .store = efivar_attr_store,
29083 };
29084diff -urNp linux-2.6.32.46/drivers/firmware/iscsi_ibft.c linux-2.6.32.46/drivers/firmware/iscsi_ibft.c
29085--- linux-2.6.32.46/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
29086+++ linux-2.6.32.46/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
29087@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
29088 return ret;
29089 }
29090
29091-static struct sysfs_ops ibft_attr_ops = {
29092+static const struct sysfs_ops ibft_attr_ops = {
29093 .show = ibft_show_attribute,
29094 };
29095
29096diff -urNp linux-2.6.32.46/drivers/firmware/memmap.c linux-2.6.32.46/drivers/firmware/memmap.c
29097--- linux-2.6.32.46/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
29098+++ linux-2.6.32.46/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
29099@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
29100 NULL
29101 };
29102
29103-static struct sysfs_ops memmap_attr_ops = {
29104+static const struct sysfs_ops memmap_attr_ops = {
29105 .show = memmap_attr_show,
29106 };
29107
29108diff -urNp linux-2.6.32.46/drivers/gpio/vr41xx_giu.c linux-2.6.32.46/drivers/gpio/vr41xx_giu.c
29109--- linux-2.6.32.46/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
29110+++ linux-2.6.32.46/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
29111@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
29112 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
29113 maskl, pendl, maskh, pendh);
29114
29115- atomic_inc(&irq_err_count);
29116+ atomic_inc_unchecked(&irq_err_count);
29117
29118 return -EINVAL;
29119 }
29120diff -urNp linux-2.6.32.46/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.46/drivers/gpu/drm/drm_crtc_helper.c
29121--- linux-2.6.32.46/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
29122+++ linux-2.6.32.46/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
29123@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
29124 struct drm_crtc *tmp;
29125 int crtc_mask = 1;
29126
29127- WARN(!crtc, "checking null crtc?");
29128+ BUG_ON(!crtc);
29129
29130 dev = crtc->dev;
29131
29132@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
29133
29134 adjusted_mode = drm_mode_duplicate(dev, mode);
29135
29136+ pax_track_stack();
29137+
29138 crtc->enabled = drm_helper_crtc_in_use(crtc);
29139
29140 if (!crtc->enabled)
29141diff -urNp linux-2.6.32.46/drivers/gpu/drm/drm_drv.c linux-2.6.32.46/drivers/gpu/drm/drm_drv.c
29142--- linux-2.6.32.46/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
29143+++ linux-2.6.32.46/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
29144@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
29145 char *kdata = NULL;
29146
29147 atomic_inc(&dev->ioctl_count);
29148- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
29149+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
29150 ++file_priv->ioctl_count;
29151
29152 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
29153diff -urNp linux-2.6.32.46/drivers/gpu/drm/drm_fops.c linux-2.6.32.46/drivers/gpu/drm/drm_fops.c
29154--- linux-2.6.32.46/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
29155+++ linux-2.6.32.46/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
29156@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
29157 }
29158
29159 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
29160- atomic_set(&dev->counts[i], 0);
29161+ atomic_set_unchecked(&dev->counts[i], 0);
29162
29163 dev->sigdata.lock = NULL;
29164
29165@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
29166
29167 retcode = drm_open_helper(inode, filp, dev);
29168 if (!retcode) {
29169- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
29170+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
29171 spin_lock(&dev->count_lock);
29172- if (!dev->open_count++) {
29173+ if (local_inc_return(&dev->open_count) == 1) {
29174 spin_unlock(&dev->count_lock);
29175 retcode = drm_setup(dev);
29176 goto out;
29177@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
29178
29179 lock_kernel();
29180
29181- DRM_DEBUG("open_count = %d\n", dev->open_count);
29182+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
29183
29184 if (dev->driver->preclose)
29185 dev->driver->preclose(dev, file_priv);
29186@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
29187 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
29188 task_pid_nr(current),
29189 (long)old_encode_dev(file_priv->minor->device),
29190- dev->open_count);
29191+ local_read(&dev->open_count));
29192
29193 /* if the master has gone away we can't do anything with the lock */
29194 if (file_priv->minor->master)
29195@@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
29196 * End inline drm_release
29197 */
29198
29199- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
29200+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
29201 spin_lock(&dev->count_lock);
29202- if (!--dev->open_count) {
29203+ if (local_dec_and_test(&dev->open_count)) {
29204 if (atomic_read(&dev->ioctl_count)) {
29205 DRM_ERROR("Device busy: %d\n",
29206 atomic_read(&dev->ioctl_count));
29207diff -urNp linux-2.6.32.46/drivers/gpu/drm/drm_gem.c linux-2.6.32.46/drivers/gpu/drm/drm_gem.c
29208--- linux-2.6.32.46/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
29209+++ linux-2.6.32.46/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
29210@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
29211 spin_lock_init(&dev->object_name_lock);
29212 idr_init(&dev->object_name_idr);
29213 atomic_set(&dev->object_count, 0);
29214- atomic_set(&dev->object_memory, 0);
29215+ atomic_set_unchecked(&dev->object_memory, 0);
29216 atomic_set(&dev->pin_count, 0);
29217- atomic_set(&dev->pin_memory, 0);
29218+ atomic_set_unchecked(&dev->pin_memory, 0);
29219 atomic_set(&dev->gtt_count, 0);
29220- atomic_set(&dev->gtt_memory, 0);
29221+ atomic_set_unchecked(&dev->gtt_memory, 0);
29222
29223 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
29224 if (!mm) {
29225@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
29226 goto fput;
29227 }
29228 atomic_inc(&dev->object_count);
29229- atomic_add(obj->size, &dev->object_memory);
29230+ atomic_add_unchecked(obj->size, &dev->object_memory);
29231 return obj;
29232 fput:
29233 fput(obj->filp);
29234@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
29235
29236 fput(obj->filp);
29237 atomic_dec(&dev->object_count);
29238- atomic_sub(obj->size, &dev->object_memory);
29239+ atomic_sub_unchecked(obj->size, &dev->object_memory);
29240 kfree(obj);
29241 }
29242 EXPORT_SYMBOL(drm_gem_object_free);
29243diff -urNp linux-2.6.32.46/drivers/gpu/drm/drm_info.c linux-2.6.32.46/drivers/gpu/drm/drm_info.c
29244--- linux-2.6.32.46/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
29245+++ linux-2.6.32.46/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
29246@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
29247 struct drm_local_map *map;
29248 struct drm_map_list *r_list;
29249
29250- /* Hardcoded from _DRM_FRAME_BUFFER,
29251- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
29252- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
29253- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
29254+ static const char * const types[] = {
29255+ [_DRM_FRAME_BUFFER] = "FB",
29256+ [_DRM_REGISTERS] = "REG",
29257+ [_DRM_SHM] = "SHM",
29258+ [_DRM_AGP] = "AGP",
29259+ [_DRM_SCATTER_GATHER] = "SG",
29260+ [_DRM_CONSISTENT] = "PCI",
29261+ [_DRM_GEM] = "GEM" };
29262 const char *type;
29263 int i;
29264
29265@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
29266 map = r_list->map;
29267 if (!map)
29268 continue;
29269- if (map->type < 0 || map->type > 5)
29270+ if (map->type >= ARRAY_SIZE(types))
29271 type = "??";
29272 else
29273 type = types[map->type];
29274@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
29275 struct drm_device *dev = node->minor->dev;
29276
29277 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
29278- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
29279+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
29280 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
29281- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
29282- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
29283+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
29284+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
29285 seq_printf(m, "%d gtt total\n", dev->gtt_total);
29286 return 0;
29287 }
29288@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
29289 mutex_lock(&dev->struct_mutex);
29290 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
29291 atomic_read(&dev->vma_count),
29292+#ifdef CONFIG_GRKERNSEC_HIDESYM
29293+ NULL, 0);
29294+#else
29295 high_memory, (u64)virt_to_phys(high_memory));
29296+#endif
29297
29298 list_for_each_entry(pt, &dev->vmalist, head) {
29299 vma = pt->vma;
29300@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
29301 continue;
29302 seq_printf(m,
29303 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
29304- pt->pid, vma->vm_start, vma->vm_end,
29305+ pt->pid,
29306+#ifdef CONFIG_GRKERNSEC_HIDESYM
29307+ 0, 0,
29308+#else
29309+ vma->vm_start, vma->vm_end,
29310+#endif
29311 vma->vm_flags & VM_READ ? 'r' : '-',
29312 vma->vm_flags & VM_WRITE ? 'w' : '-',
29313 vma->vm_flags & VM_EXEC ? 'x' : '-',
29314 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
29315 vma->vm_flags & VM_LOCKED ? 'l' : '-',
29316 vma->vm_flags & VM_IO ? 'i' : '-',
29317+#ifdef CONFIG_GRKERNSEC_HIDESYM
29318+ 0);
29319+#else
29320 vma->vm_pgoff);
29321+#endif
29322
29323 #if defined(__i386__)
29324 pgprot = pgprot_val(vma->vm_page_prot);
29325diff -urNp linux-2.6.32.46/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.46/drivers/gpu/drm/drm_ioctl.c
29326--- linux-2.6.32.46/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
29327+++ linux-2.6.32.46/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
29328@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
29329 stats->data[i].value =
29330 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
29331 else
29332- stats->data[i].value = atomic_read(&dev->counts[i]);
29333+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
29334 stats->data[i].type = dev->types[i];
29335 }
29336
29337diff -urNp linux-2.6.32.46/drivers/gpu/drm/drm_lock.c linux-2.6.32.46/drivers/gpu/drm/drm_lock.c
29338--- linux-2.6.32.46/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
29339+++ linux-2.6.32.46/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
29340@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
29341 if (drm_lock_take(&master->lock, lock->context)) {
29342 master->lock.file_priv = file_priv;
29343 master->lock.lock_time = jiffies;
29344- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
29345+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
29346 break; /* Got lock */
29347 }
29348
29349@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
29350 return -EINVAL;
29351 }
29352
29353- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
29354+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
29355
29356 /* kernel_context_switch isn't used by any of the x86 drm
29357 * modules but is required by the Sparc driver.
29358diff -urNp linux-2.6.32.46/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.46/drivers/gpu/drm/i810/i810_dma.c
29359--- linux-2.6.32.46/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
29360+++ linux-2.6.32.46/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
29361@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
29362 dma->buflist[vertex->idx],
29363 vertex->discard, vertex->used);
29364
29365- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29366- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29367+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29368+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29369 sarea_priv->last_enqueue = dev_priv->counter - 1;
29370 sarea_priv->last_dispatch = (int)hw_status[5];
29371
29372@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
29373 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
29374 mc->last_render);
29375
29376- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29377- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29378+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29379+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29380 sarea_priv->last_enqueue = dev_priv->counter - 1;
29381 sarea_priv->last_dispatch = (int)hw_status[5];
29382
29383diff -urNp linux-2.6.32.46/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.46/drivers/gpu/drm/i810/i810_drv.h
29384--- linux-2.6.32.46/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
29385+++ linux-2.6.32.46/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
29386@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
29387 int page_flipping;
29388
29389 wait_queue_head_t irq_queue;
29390- atomic_t irq_received;
29391- atomic_t irq_emitted;
29392+ atomic_unchecked_t irq_received;
29393+ atomic_unchecked_t irq_emitted;
29394
29395 int front_offset;
29396 } drm_i810_private_t;
29397diff -urNp linux-2.6.32.46/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.46/drivers/gpu/drm/i830/i830_drv.h
29398--- linux-2.6.32.46/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
29399+++ linux-2.6.32.46/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
29400@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
29401 int page_flipping;
29402
29403 wait_queue_head_t irq_queue;
29404- atomic_t irq_received;
29405- atomic_t irq_emitted;
29406+ atomic_unchecked_t irq_received;
29407+ atomic_unchecked_t irq_emitted;
29408
29409 int use_mi_batchbuffer_start;
29410
29411diff -urNp linux-2.6.32.46/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.46/drivers/gpu/drm/i830/i830_irq.c
29412--- linux-2.6.32.46/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
29413+++ linux-2.6.32.46/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
29414@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
29415
29416 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
29417
29418- atomic_inc(&dev_priv->irq_received);
29419+ atomic_inc_unchecked(&dev_priv->irq_received);
29420 wake_up_interruptible(&dev_priv->irq_queue);
29421
29422 return IRQ_HANDLED;
29423@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
29424
29425 DRM_DEBUG("%s\n", __func__);
29426
29427- atomic_inc(&dev_priv->irq_emitted);
29428+ atomic_inc_unchecked(&dev_priv->irq_emitted);
29429
29430 BEGIN_LP_RING(2);
29431 OUT_RING(0);
29432 OUT_RING(GFX_OP_USER_INTERRUPT);
29433 ADVANCE_LP_RING();
29434
29435- return atomic_read(&dev_priv->irq_emitted);
29436+ return atomic_read_unchecked(&dev_priv->irq_emitted);
29437 }
29438
29439 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
29440@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
29441
29442 DRM_DEBUG("%s\n", __func__);
29443
29444- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29445+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29446 return 0;
29447
29448 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
29449@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
29450
29451 for (;;) {
29452 __set_current_state(TASK_INTERRUPTIBLE);
29453- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29454+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29455 break;
29456 if ((signed)(end - jiffies) <= 0) {
29457 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
29458@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
29459 I830_WRITE16(I830REG_HWSTAM, 0xffff);
29460 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
29461 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
29462- atomic_set(&dev_priv->irq_received, 0);
29463- atomic_set(&dev_priv->irq_emitted, 0);
29464+ atomic_set_unchecked(&dev_priv->irq_received, 0);
29465+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
29466 init_waitqueue_head(&dev_priv->irq_queue);
29467 }
29468
29469diff -urNp linux-2.6.32.46/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.46/drivers/gpu/drm/i915/dvo_ch7017.c
29470--- linux-2.6.32.46/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
29471+++ linux-2.6.32.46/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
29472@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
29473 }
29474 }
29475
29476-struct intel_dvo_dev_ops ch7017_ops = {
29477+const struct intel_dvo_dev_ops ch7017_ops = {
29478 .init = ch7017_init,
29479 .detect = ch7017_detect,
29480 .mode_valid = ch7017_mode_valid,
29481diff -urNp linux-2.6.32.46/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.46/drivers/gpu/drm/i915/dvo_ch7xxx.c
29482--- linux-2.6.32.46/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
29483+++ linux-2.6.32.46/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
29484@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
29485 }
29486 }
29487
29488-struct intel_dvo_dev_ops ch7xxx_ops = {
29489+const struct intel_dvo_dev_ops ch7xxx_ops = {
29490 .init = ch7xxx_init,
29491 .detect = ch7xxx_detect,
29492 .mode_valid = ch7xxx_mode_valid,
29493diff -urNp linux-2.6.32.46/drivers/gpu/drm/i915/dvo.h linux-2.6.32.46/drivers/gpu/drm/i915/dvo.h
29494--- linux-2.6.32.46/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
29495+++ linux-2.6.32.46/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
29496@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
29497 *
29498 * \return singly-linked list of modes or NULL if no modes found.
29499 */
29500- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
29501+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
29502
29503 /**
29504 * Clean up driver-specific bits of the output
29505 */
29506- void (*destroy) (struct intel_dvo_device *dvo);
29507+ void (* const destroy) (struct intel_dvo_device *dvo);
29508
29509 /**
29510 * Debugging hook to dump device registers to log file
29511 */
29512- void (*dump_regs)(struct intel_dvo_device *dvo);
29513+ void (* const dump_regs)(struct intel_dvo_device *dvo);
29514 };
29515
29516-extern struct intel_dvo_dev_ops sil164_ops;
29517-extern struct intel_dvo_dev_ops ch7xxx_ops;
29518-extern struct intel_dvo_dev_ops ivch_ops;
29519-extern struct intel_dvo_dev_ops tfp410_ops;
29520-extern struct intel_dvo_dev_ops ch7017_ops;
29521+extern const struct intel_dvo_dev_ops sil164_ops;
29522+extern const struct intel_dvo_dev_ops ch7xxx_ops;
29523+extern const struct intel_dvo_dev_ops ivch_ops;
29524+extern const struct intel_dvo_dev_ops tfp410_ops;
29525+extern const struct intel_dvo_dev_ops ch7017_ops;
29526
29527 #endif /* _INTEL_DVO_H */
29528diff -urNp linux-2.6.32.46/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.46/drivers/gpu/drm/i915/dvo_ivch.c
29529--- linux-2.6.32.46/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
29530+++ linux-2.6.32.46/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
29531@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
29532 }
29533 }
29534
29535-struct intel_dvo_dev_ops ivch_ops= {
29536+const struct intel_dvo_dev_ops ivch_ops= {
29537 .init = ivch_init,
29538 .dpms = ivch_dpms,
29539 .save = ivch_save,
29540diff -urNp linux-2.6.32.46/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.46/drivers/gpu/drm/i915/dvo_sil164.c
29541--- linux-2.6.32.46/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
29542+++ linux-2.6.32.46/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
29543@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
29544 }
29545 }
29546
29547-struct intel_dvo_dev_ops sil164_ops = {
29548+const struct intel_dvo_dev_ops sil164_ops = {
29549 .init = sil164_init,
29550 .detect = sil164_detect,
29551 .mode_valid = sil164_mode_valid,
29552diff -urNp linux-2.6.32.46/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.46/drivers/gpu/drm/i915/dvo_tfp410.c
29553--- linux-2.6.32.46/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
29554+++ linux-2.6.32.46/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
29555@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
29556 }
29557 }
29558
29559-struct intel_dvo_dev_ops tfp410_ops = {
29560+const struct intel_dvo_dev_ops tfp410_ops = {
29561 .init = tfp410_init,
29562 .detect = tfp410_detect,
29563 .mode_valid = tfp410_mode_valid,
29564diff -urNp linux-2.6.32.46/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.46/drivers/gpu/drm/i915/i915_debugfs.c
29565--- linux-2.6.32.46/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
29566+++ linux-2.6.32.46/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
29567@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
29568 I915_READ(GTIMR));
29569 }
29570 seq_printf(m, "Interrupts received: %d\n",
29571- atomic_read(&dev_priv->irq_received));
29572+ atomic_read_unchecked(&dev_priv->irq_received));
29573 if (dev_priv->hw_status_page != NULL) {
29574 seq_printf(m, "Current sequence: %d\n",
29575 i915_get_gem_seqno(dev));
29576diff -urNp linux-2.6.32.46/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.46/drivers/gpu/drm/i915/i915_drv.c
29577--- linux-2.6.32.46/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
29578+++ linux-2.6.32.46/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
29579@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
29580 return i915_resume(dev);
29581 }
29582
29583-static struct vm_operations_struct i915_gem_vm_ops = {
29584+static const struct vm_operations_struct i915_gem_vm_ops = {
29585 .fault = i915_gem_fault,
29586 .open = drm_gem_vm_open,
29587 .close = drm_gem_vm_close,
29588diff -urNp linux-2.6.32.46/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.46/drivers/gpu/drm/i915/i915_drv.h
29589--- linux-2.6.32.46/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
29590+++ linux-2.6.32.46/drivers/gpu/drm/i915/i915_drv.h 2011-08-05 20:33:55.000000000 -0400
29591@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
29592 /* display clock increase/decrease */
29593 /* pll clock increase/decrease */
29594 /* clock gating init */
29595-};
29596+} __no_const;
29597
29598 typedef struct drm_i915_private {
29599 struct drm_device *dev;
29600@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
29601 int page_flipping;
29602
29603 wait_queue_head_t irq_queue;
29604- atomic_t irq_received;
29605+ atomic_unchecked_t irq_received;
29606 /** Protects user_irq_refcount and irq_mask_reg */
29607 spinlock_t user_irq_lock;
29608 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
29609diff -urNp linux-2.6.32.46/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.46/drivers/gpu/drm/i915/i915_gem.c
29610--- linux-2.6.32.46/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
29611+++ linux-2.6.32.46/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
29612@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
29613
29614 args->aper_size = dev->gtt_total;
29615 args->aper_available_size = (args->aper_size -
29616- atomic_read(&dev->pin_memory));
29617+ atomic_read_unchecked(&dev->pin_memory));
29618
29619 return 0;
29620 }
29621@@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
29622 return -EINVAL;
29623 }
29624
29625+ if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29626+ drm_gem_object_unreference(obj);
29627+ return -EFAULT;
29628+ }
29629+
29630 if (i915_gem_object_needs_bit17_swizzle(obj)) {
29631 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
29632 } else {
29633@@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
29634 return -EINVAL;
29635 }
29636
29637+ if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29638+ drm_gem_object_unreference(obj);
29639+ return -EFAULT;
29640+ }
29641+
29642 /* We can only do the GTT pwrite on untiled buffers, as otherwise
29643 * it would end up going through the fenced access, and we'll get
29644 * different detiling behavior between reading and writing.
29645@@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
29646
29647 if (obj_priv->gtt_space) {
29648 atomic_dec(&dev->gtt_count);
29649- atomic_sub(obj->size, &dev->gtt_memory);
29650+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
29651
29652 drm_mm_put_block(obj_priv->gtt_space);
29653 obj_priv->gtt_space = NULL;
29654@@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
29655 goto search_free;
29656 }
29657 atomic_inc(&dev->gtt_count);
29658- atomic_add(obj->size, &dev->gtt_memory);
29659+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
29660
29661 /* Assert that the object is not currently in any GPU domain. As it
29662 * wasn't in the GTT, there shouldn't be any way it could have been in
29663@@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
29664 "%d/%d gtt bytes\n",
29665 atomic_read(&dev->object_count),
29666 atomic_read(&dev->pin_count),
29667- atomic_read(&dev->object_memory),
29668- atomic_read(&dev->pin_memory),
29669- atomic_read(&dev->gtt_memory),
29670+ atomic_read_unchecked(&dev->object_memory),
29671+ atomic_read_unchecked(&dev->pin_memory),
29672+ atomic_read_unchecked(&dev->gtt_memory),
29673 dev->gtt_total);
29674 }
29675 goto err;
29676@@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
29677 */
29678 if (obj_priv->pin_count == 1) {
29679 atomic_inc(&dev->pin_count);
29680- atomic_add(obj->size, &dev->pin_memory);
29681+ atomic_add_unchecked(obj->size, &dev->pin_memory);
29682 if (!obj_priv->active &&
29683 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
29684 !list_empty(&obj_priv->list))
29685@@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
29686 list_move_tail(&obj_priv->list,
29687 &dev_priv->mm.inactive_list);
29688 atomic_dec(&dev->pin_count);
29689- atomic_sub(obj->size, &dev->pin_memory);
29690+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
29691 }
29692 i915_verify_inactive(dev, __FILE__, __LINE__);
29693 }
29694diff -urNp linux-2.6.32.46/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.46/drivers/gpu/drm/i915/i915_irq.c
29695--- linux-2.6.32.46/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
29696+++ linux-2.6.32.46/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
29697@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
29698 int irq_received;
29699 int ret = IRQ_NONE;
29700
29701- atomic_inc(&dev_priv->irq_received);
29702+ atomic_inc_unchecked(&dev_priv->irq_received);
29703
29704 if (IS_IGDNG(dev))
29705 return igdng_irq_handler(dev);
29706@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
29707 {
29708 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29709
29710- atomic_set(&dev_priv->irq_received, 0);
29711+ atomic_set_unchecked(&dev_priv->irq_received, 0);
29712
29713 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29714 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29715diff -urNp linux-2.6.32.46/drivers/gpu/drm/i915/intel_sdvo.c linux-2.6.32.46/drivers/gpu/drm/i915/intel_sdvo.c
29716--- linux-2.6.32.46/drivers/gpu/drm/i915/intel_sdvo.c 2011-03-27 14:31:47.000000000 -0400
29717+++ linux-2.6.32.46/drivers/gpu/drm/i915/intel_sdvo.c 2011-08-05 20:33:55.000000000 -0400
29718@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *
29719 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
29720
29721 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
29722- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29723+ pax_open_kernel();
29724+ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29725+ pax_close_kernel();
29726
29727 /* Read the regs to test if we can talk to the device */
29728 for (i = 0; i < 0x40; i++) {
29729diff -urNp linux-2.6.32.46/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.46/drivers/gpu/drm/mga/mga_drv.h
29730--- linux-2.6.32.46/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
29731+++ linux-2.6.32.46/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
29732@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
29733 u32 clear_cmd;
29734 u32 maccess;
29735
29736- atomic_t vbl_received; /**< Number of vblanks received. */
29737+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
29738 wait_queue_head_t fence_queue;
29739- atomic_t last_fence_retired;
29740+ atomic_unchecked_t last_fence_retired;
29741 u32 next_fence_to_post;
29742
29743 unsigned int fb_cpp;
29744diff -urNp linux-2.6.32.46/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.46/drivers/gpu/drm/mga/mga_irq.c
29745--- linux-2.6.32.46/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
29746+++ linux-2.6.32.46/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
29747@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
29748 if (crtc != 0)
29749 return 0;
29750
29751- return atomic_read(&dev_priv->vbl_received);
29752+ return atomic_read_unchecked(&dev_priv->vbl_received);
29753 }
29754
29755
29756@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29757 /* VBLANK interrupt */
29758 if (status & MGA_VLINEPEN) {
29759 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29760- atomic_inc(&dev_priv->vbl_received);
29761+ atomic_inc_unchecked(&dev_priv->vbl_received);
29762 drm_handle_vblank(dev, 0);
29763 handled = 1;
29764 }
29765@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29766 MGA_WRITE(MGA_PRIMEND, prim_end);
29767 }
29768
29769- atomic_inc(&dev_priv->last_fence_retired);
29770+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
29771 DRM_WAKEUP(&dev_priv->fence_queue);
29772 handled = 1;
29773 }
29774@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
29775 * using fences.
29776 */
29777 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29778- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29779+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29780 - *sequence) <= (1 << 23)));
29781
29782 *sequence = cur_fence;
29783diff -urNp linux-2.6.32.46/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.46/drivers/gpu/drm/r128/r128_cce.c
29784--- linux-2.6.32.46/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
29785+++ linux-2.6.32.46/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
29786@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
29787
29788 /* GH: Simple idle check.
29789 */
29790- atomic_set(&dev_priv->idle_count, 0);
29791+ atomic_set_unchecked(&dev_priv->idle_count, 0);
29792
29793 /* We don't support anything other than bus-mastering ring mode,
29794 * but the ring can be in either AGP or PCI space for the ring
29795diff -urNp linux-2.6.32.46/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.46/drivers/gpu/drm/r128/r128_drv.h
29796--- linux-2.6.32.46/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
29797+++ linux-2.6.32.46/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
29798@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
29799 int is_pci;
29800 unsigned long cce_buffers_offset;
29801
29802- atomic_t idle_count;
29803+ atomic_unchecked_t idle_count;
29804
29805 int page_flipping;
29806 int current_page;
29807 u32 crtc_offset;
29808 u32 crtc_offset_cntl;
29809
29810- atomic_t vbl_received;
29811+ atomic_unchecked_t vbl_received;
29812
29813 u32 color_fmt;
29814 unsigned int front_offset;
29815diff -urNp linux-2.6.32.46/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.46/drivers/gpu/drm/r128/r128_irq.c
29816--- linux-2.6.32.46/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
29817+++ linux-2.6.32.46/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
29818@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
29819 if (crtc != 0)
29820 return 0;
29821
29822- return atomic_read(&dev_priv->vbl_received);
29823+ return atomic_read_unchecked(&dev_priv->vbl_received);
29824 }
29825
29826 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
29827@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
29828 /* VBLANK interrupt */
29829 if (status & R128_CRTC_VBLANK_INT) {
29830 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
29831- atomic_inc(&dev_priv->vbl_received);
29832+ atomic_inc_unchecked(&dev_priv->vbl_received);
29833 drm_handle_vblank(dev, 0);
29834 return IRQ_HANDLED;
29835 }
29836diff -urNp linux-2.6.32.46/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.46/drivers/gpu/drm/r128/r128_state.c
29837--- linux-2.6.32.46/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
29838+++ linux-2.6.32.46/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
29839@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
29840
29841 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
29842 {
29843- if (atomic_read(&dev_priv->idle_count) == 0) {
29844+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
29845 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
29846 } else {
29847- atomic_set(&dev_priv->idle_count, 0);
29848+ atomic_set_unchecked(&dev_priv->idle_count, 0);
29849 }
29850 }
29851
29852diff -urNp linux-2.6.32.46/drivers/gpu/drm/radeon/atom.c linux-2.6.32.46/drivers/gpu/drm/radeon/atom.c
29853--- linux-2.6.32.46/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
29854+++ linux-2.6.32.46/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
29855@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
29856 char name[512];
29857 int i;
29858
29859+ pax_track_stack();
29860+
29861 ctx->card = card;
29862 ctx->bios = bios;
29863
29864diff -urNp linux-2.6.32.46/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.46/drivers/gpu/drm/radeon/mkregtable.c
29865--- linux-2.6.32.46/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
29866+++ linux-2.6.32.46/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
29867@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
29868 regex_t mask_rex;
29869 regmatch_t match[4];
29870 char buf[1024];
29871- size_t end;
29872+ long end;
29873 int len;
29874 int done = 0;
29875 int r;
29876 unsigned o;
29877 struct offset *offset;
29878 char last_reg_s[10];
29879- int last_reg;
29880+ unsigned long last_reg;
29881
29882 if (regcomp
29883 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
29884diff -urNp linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_atombios.c
29885--- linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
29886+++ linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
29887@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
29888 bool linkb;
29889 struct radeon_i2c_bus_rec ddc_bus;
29890
29891+ pax_track_stack();
29892+
29893 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29894
29895 if (data_offset == 0)
29896@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
29897 }
29898 }
29899
29900-struct bios_connector {
29901+static struct bios_connector {
29902 bool valid;
29903 uint16_t line_mux;
29904 uint16_t devices;
29905 int connector_type;
29906 struct radeon_i2c_bus_rec ddc_bus;
29907-};
29908+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29909
29910 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
29911 drm_device
29912@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
29913 uint8_t dac;
29914 union atom_supported_devices *supported_devices;
29915 int i, j;
29916- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29917
29918 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29919
29920diff -urNp linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_display.c
29921--- linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
29922+++ linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
29923@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
29924
29925 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
29926 error = freq - current_freq;
29927- error = error < 0 ? 0xffffffff : error;
29928+ error = (int32_t)error < 0 ? 0xffffffff : error;
29929 } else
29930 error = abs(current_freq - freq);
29931 vco_diff = abs(vco - best_vco);
29932diff -urNp linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_drv.h
29933--- linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
29934+++ linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
29935@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
29936
29937 /* SW interrupt */
29938 wait_queue_head_t swi_queue;
29939- atomic_t swi_emitted;
29940+ atomic_unchecked_t swi_emitted;
29941 int vblank_crtc;
29942 uint32_t irq_enable_reg;
29943 uint32_t r500_disp_irq_reg;
29944diff -urNp linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_fence.c
29945--- linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
29946+++ linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
29947@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
29948 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
29949 return 0;
29950 }
29951- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
29952+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
29953 if (!rdev->cp.ready) {
29954 /* FIXME: cp is not running assume everythings is done right
29955 * away
29956@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
29957 return r;
29958 }
29959 WREG32(rdev->fence_drv.scratch_reg, 0);
29960- atomic_set(&rdev->fence_drv.seq, 0);
29961+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
29962 INIT_LIST_HEAD(&rdev->fence_drv.created);
29963 INIT_LIST_HEAD(&rdev->fence_drv.emited);
29964 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
29965diff -urNp linux-2.6.32.46/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.46/drivers/gpu/drm/radeon/radeon.h
29966--- linux-2.6.32.46/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
29967+++ linux-2.6.32.46/drivers/gpu/drm/radeon/radeon.h 2011-08-05 20:33:55.000000000 -0400
29968@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
29969 */
29970 struct radeon_fence_driver {
29971 uint32_t scratch_reg;
29972- atomic_t seq;
29973+ atomic_unchecked_t seq;
29974 uint32_t last_seq;
29975 unsigned long count_timeout;
29976 wait_queue_head_t queue;
29977@@ -640,7 +640,7 @@ struct radeon_asic {
29978 uint32_t offset, uint32_t obj_size);
29979 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
29980 void (*bandwidth_update)(struct radeon_device *rdev);
29981-};
29982+} __no_const;
29983
29984 /*
29985 * Asic structures
29986diff -urNp linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_ioc32.c
29987--- linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
29988+++ linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
29989@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
29990 request = compat_alloc_user_space(sizeof(*request));
29991 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
29992 || __put_user(req32.param, &request->param)
29993- || __put_user((void __user *)(unsigned long)req32.value,
29994+ || __put_user((unsigned long)req32.value,
29995 &request->value))
29996 return -EFAULT;
29997
29998diff -urNp linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_irq.c
29999--- linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
30000+++ linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
30001@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
30002 unsigned int ret;
30003 RING_LOCALS;
30004
30005- atomic_inc(&dev_priv->swi_emitted);
30006- ret = atomic_read(&dev_priv->swi_emitted);
30007+ atomic_inc_unchecked(&dev_priv->swi_emitted);
30008+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30009
30010 BEGIN_RING(4);
30011 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30012@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
30013 drm_radeon_private_t *dev_priv =
30014 (drm_radeon_private_t *) dev->dev_private;
30015
30016- atomic_set(&dev_priv->swi_emitted, 0);
30017+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30018 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30019
30020 dev->max_vblank_count = 0x001fffff;
30021diff -urNp linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_state.c
30022--- linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
30023+++ linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
30024@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
30025 {
30026 drm_radeon_private_t *dev_priv = dev->dev_private;
30027 drm_radeon_getparam_t *param = data;
30028- int value;
30029+ int value = 0;
30030
30031 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
30032
30033diff -urNp linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_ttm.c
30034--- linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
30035+++ linux-2.6.32.46/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
30036@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
30037 DRM_INFO("radeon: ttm finalized\n");
30038 }
30039
30040-static struct vm_operations_struct radeon_ttm_vm_ops;
30041-static const struct vm_operations_struct *ttm_vm_ops = NULL;
30042-
30043-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
30044-{
30045- struct ttm_buffer_object *bo;
30046- int r;
30047-
30048- bo = (struct ttm_buffer_object *)vma->vm_private_data;
30049- if (bo == NULL) {
30050- return VM_FAULT_NOPAGE;
30051- }
30052- r = ttm_vm_ops->fault(vma, vmf);
30053- return r;
30054-}
30055-
30056 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
30057 {
30058 struct drm_file *file_priv;
30059 struct radeon_device *rdev;
30060- int r;
30061
30062 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
30063 return drm_mmap(filp, vma);
30064@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
30065
30066 file_priv = (struct drm_file *)filp->private_data;
30067 rdev = file_priv->minor->dev->dev_private;
30068- if (rdev == NULL) {
30069+ if (!rdev)
30070 return -EINVAL;
30071- }
30072- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
30073- if (unlikely(r != 0)) {
30074- return r;
30075- }
30076- if (unlikely(ttm_vm_ops == NULL)) {
30077- ttm_vm_ops = vma->vm_ops;
30078- radeon_ttm_vm_ops = *ttm_vm_ops;
30079- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30080- }
30081- vma->vm_ops = &radeon_ttm_vm_ops;
30082- return 0;
30083+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
30084 }
30085
30086
30087diff -urNp linux-2.6.32.46/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.46/drivers/gpu/drm/radeon/rs690.c
30088--- linux-2.6.32.46/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
30089+++ linux-2.6.32.46/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
30090@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
30091 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
30092 rdev->pm.sideport_bandwidth.full)
30093 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
30094- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
30095+ read_delay_latency.full = rfixed_const(800 * 1000);
30096 read_delay_latency.full = rfixed_div(read_delay_latency,
30097 rdev->pm.igp_sideport_mclk);
30098+ a.full = rfixed_const(370);
30099+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
30100 } else {
30101 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
30102 rdev->pm.k8_bandwidth.full)
30103diff -urNp linux-2.6.32.46/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.46/drivers/gpu/drm/ttm/ttm_bo.c
30104--- linux-2.6.32.46/drivers/gpu/drm/ttm/ttm_bo.c 2011-08-29 22:24:44.000000000 -0400
30105+++ linux-2.6.32.46/drivers/gpu/drm/ttm/ttm_bo.c 2011-08-29 22:25:07.000000000 -0400
30106@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
30107 NULL
30108 };
30109
30110-static struct sysfs_ops ttm_bo_global_ops = {
30111+static const struct sysfs_ops ttm_bo_global_ops = {
30112 .show = &ttm_bo_global_show
30113 };
30114
30115diff -urNp linux-2.6.32.46/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.46/drivers/gpu/drm/ttm/ttm_bo_vm.c
30116--- linux-2.6.32.46/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
30117+++ linux-2.6.32.46/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
30118@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
30119 {
30120 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
30121 vma->vm_private_data;
30122- struct ttm_bo_device *bdev = bo->bdev;
30123+ struct ttm_bo_device *bdev;
30124 unsigned long bus_base;
30125 unsigned long bus_offset;
30126 unsigned long bus_size;
30127@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
30128 unsigned long address = (unsigned long)vmf->virtual_address;
30129 int retval = VM_FAULT_NOPAGE;
30130
30131+ if (!bo)
30132+ return VM_FAULT_NOPAGE;
30133+ bdev = bo->bdev;
30134+
30135 /*
30136 * Work around locking order reversal in fault / nopfn
30137 * between mmap_sem and bo_reserve: Perform a trylock operation
30138diff -urNp linux-2.6.32.46/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.46/drivers/gpu/drm/ttm/ttm_global.c
30139--- linux-2.6.32.46/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
30140+++ linux-2.6.32.46/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
30141@@ -36,7 +36,7 @@
30142 struct ttm_global_item {
30143 struct mutex mutex;
30144 void *object;
30145- int refcount;
30146+ atomic_t refcount;
30147 };
30148
30149 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
30150@@ -49,7 +49,7 @@ void ttm_global_init(void)
30151 struct ttm_global_item *item = &glob[i];
30152 mutex_init(&item->mutex);
30153 item->object = NULL;
30154- item->refcount = 0;
30155+ atomic_set(&item->refcount, 0);
30156 }
30157 }
30158
30159@@ -59,7 +59,7 @@ void ttm_global_release(void)
30160 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
30161 struct ttm_global_item *item = &glob[i];
30162 BUG_ON(item->object != NULL);
30163- BUG_ON(item->refcount != 0);
30164+ BUG_ON(atomic_read(&item->refcount) != 0);
30165 }
30166 }
30167
30168@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
30169 void *object;
30170
30171 mutex_lock(&item->mutex);
30172- if (item->refcount == 0) {
30173+ if (atomic_read(&item->refcount) == 0) {
30174 item->object = kzalloc(ref->size, GFP_KERNEL);
30175 if (unlikely(item->object == NULL)) {
30176 ret = -ENOMEM;
30177@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
30178 goto out_err;
30179
30180 }
30181- ++item->refcount;
30182+ atomic_inc(&item->refcount);
30183 ref->object = item->object;
30184 object = item->object;
30185 mutex_unlock(&item->mutex);
30186@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
30187 struct ttm_global_item *item = &glob[ref->global_type];
30188
30189 mutex_lock(&item->mutex);
30190- BUG_ON(item->refcount == 0);
30191+ BUG_ON(atomic_read(&item->refcount) == 0);
30192 BUG_ON(ref->object != item->object);
30193- if (--item->refcount == 0) {
30194+ if (atomic_dec_and_test(&item->refcount)) {
30195 ref->release(ref);
30196 item->object = NULL;
30197 }
30198diff -urNp linux-2.6.32.46/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.46/drivers/gpu/drm/ttm/ttm_memory.c
30199--- linux-2.6.32.46/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
30200+++ linux-2.6.32.46/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
30201@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
30202 NULL
30203 };
30204
30205-static struct sysfs_ops ttm_mem_zone_ops = {
30206+static const struct sysfs_ops ttm_mem_zone_ops = {
30207 .show = &ttm_mem_zone_show,
30208 .store = &ttm_mem_zone_store
30209 };
30210diff -urNp linux-2.6.32.46/drivers/gpu/drm/via/via_drv.h linux-2.6.32.46/drivers/gpu/drm/via/via_drv.h
30211--- linux-2.6.32.46/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
30212+++ linux-2.6.32.46/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
30213@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
30214 typedef uint32_t maskarray_t[5];
30215
30216 typedef struct drm_via_irq {
30217- atomic_t irq_received;
30218+ atomic_unchecked_t irq_received;
30219 uint32_t pending_mask;
30220 uint32_t enable_mask;
30221 wait_queue_head_t irq_queue;
30222@@ -75,7 +75,7 @@ typedef struct drm_via_private {
30223 struct timeval last_vblank;
30224 int last_vblank_valid;
30225 unsigned usec_per_vblank;
30226- atomic_t vbl_received;
30227+ atomic_unchecked_t vbl_received;
30228 drm_via_state_t hc_state;
30229 char pci_buf[VIA_PCI_BUF_SIZE];
30230 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
30231diff -urNp linux-2.6.32.46/drivers/gpu/drm/via/via_irq.c linux-2.6.32.46/drivers/gpu/drm/via/via_irq.c
30232--- linux-2.6.32.46/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
30233+++ linux-2.6.32.46/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
30234@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
30235 if (crtc != 0)
30236 return 0;
30237
30238- return atomic_read(&dev_priv->vbl_received);
30239+ return atomic_read_unchecked(&dev_priv->vbl_received);
30240 }
30241
30242 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30243@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
30244
30245 status = VIA_READ(VIA_REG_INTERRUPT);
30246 if (status & VIA_IRQ_VBLANK_PENDING) {
30247- atomic_inc(&dev_priv->vbl_received);
30248- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
30249+ atomic_inc_unchecked(&dev_priv->vbl_received);
30250+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
30251 do_gettimeofday(&cur_vblank);
30252 if (dev_priv->last_vblank_valid) {
30253 dev_priv->usec_per_vblank =
30254@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
30255 dev_priv->last_vblank = cur_vblank;
30256 dev_priv->last_vblank_valid = 1;
30257 }
30258- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
30259+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
30260 DRM_DEBUG("US per vblank is: %u\n",
30261 dev_priv->usec_per_vblank);
30262 }
30263@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
30264
30265 for (i = 0; i < dev_priv->num_irqs; ++i) {
30266 if (status & cur_irq->pending_mask) {
30267- atomic_inc(&cur_irq->irq_received);
30268+ atomic_inc_unchecked(&cur_irq->irq_received);
30269 DRM_WAKEUP(&cur_irq->irq_queue);
30270 handled = 1;
30271 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
30272@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
30273 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30274 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
30275 masks[irq][4]));
30276- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
30277+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
30278 } else {
30279 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30280 (((cur_irq_sequence =
30281- atomic_read(&cur_irq->irq_received)) -
30282+ atomic_read_unchecked(&cur_irq->irq_received)) -
30283 *sequence) <= (1 << 23)));
30284 }
30285 *sequence = cur_irq_sequence;
30286@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
30287 }
30288
30289 for (i = 0; i < dev_priv->num_irqs; ++i) {
30290- atomic_set(&cur_irq->irq_received, 0);
30291+ atomic_set_unchecked(&cur_irq->irq_received, 0);
30292 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
30293 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
30294 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
30295@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
30296 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
30297 case VIA_IRQ_RELATIVE:
30298 irqwait->request.sequence +=
30299- atomic_read(&cur_irq->irq_received);
30300+ atomic_read_unchecked(&cur_irq->irq_received);
30301 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
30302 case VIA_IRQ_ABSOLUTE:
30303 break;
30304diff -urNp linux-2.6.32.46/drivers/hid/hid-core.c linux-2.6.32.46/drivers/hid/hid-core.c
30305--- linux-2.6.32.46/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
30306+++ linux-2.6.32.46/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
30307@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
30308
30309 int hid_add_device(struct hid_device *hdev)
30310 {
30311- static atomic_t id = ATOMIC_INIT(0);
30312+ static atomic_unchecked_t id = ATOMIC_INIT(0);
30313 int ret;
30314
30315 if (WARN_ON(hdev->status & HID_STAT_ADDED))
30316@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
30317 /* XXX hack, any other cleaner solution after the driver core
30318 * is converted to allow more than 20 bytes as the device name? */
30319 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
30320- hdev->vendor, hdev->product, atomic_inc_return(&id));
30321+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
30322
30323 ret = device_add(&hdev->dev);
30324 if (!ret)
30325diff -urNp linux-2.6.32.46/drivers/hid/usbhid/hiddev.c linux-2.6.32.46/drivers/hid/usbhid/hiddev.c
30326--- linux-2.6.32.46/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
30327+++ linux-2.6.32.46/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
30328@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
30329 return put_user(HID_VERSION, (int __user *)arg);
30330
30331 case HIDIOCAPPLICATION:
30332- if (arg < 0 || arg >= hid->maxapplication)
30333+ if (arg >= hid->maxapplication)
30334 return -EINVAL;
30335
30336 for (i = 0; i < hid->maxcollection; i++)
30337diff -urNp linux-2.6.32.46/drivers/hwmon/lis3lv02d.c linux-2.6.32.46/drivers/hwmon/lis3lv02d.c
30338--- linux-2.6.32.46/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
30339+++ linux-2.6.32.46/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
30340@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
30341 * the lid is closed. This leads to interrupts as soon as a little move
30342 * is done.
30343 */
30344- atomic_inc(&lis3_dev.count);
30345+ atomic_inc_unchecked(&lis3_dev.count);
30346
30347 wake_up_interruptible(&lis3_dev.misc_wait);
30348 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
30349@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
30350 if (test_and_set_bit(0, &lis3_dev.misc_opened))
30351 return -EBUSY; /* already open */
30352
30353- atomic_set(&lis3_dev.count, 0);
30354+ atomic_set_unchecked(&lis3_dev.count, 0);
30355
30356 /*
30357 * The sensor can generate interrupts for free-fall and direction
30358@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
30359 add_wait_queue(&lis3_dev.misc_wait, &wait);
30360 while (true) {
30361 set_current_state(TASK_INTERRUPTIBLE);
30362- data = atomic_xchg(&lis3_dev.count, 0);
30363+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
30364 if (data)
30365 break;
30366
30367@@ -244,7 +244,7 @@ out:
30368 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
30369 {
30370 poll_wait(file, &lis3_dev.misc_wait, wait);
30371- if (atomic_read(&lis3_dev.count))
30372+ if (atomic_read_unchecked(&lis3_dev.count))
30373 return POLLIN | POLLRDNORM;
30374 return 0;
30375 }
30376diff -urNp linux-2.6.32.46/drivers/hwmon/lis3lv02d.h linux-2.6.32.46/drivers/hwmon/lis3lv02d.h
30377--- linux-2.6.32.46/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
30378+++ linux-2.6.32.46/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
30379@@ -201,7 +201,7 @@ struct lis3lv02d {
30380
30381 struct input_polled_dev *idev; /* input device */
30382 struct platform_device *pdev; /* platform device */
30383- atomic_t count; /* interrupt count after last read */
30384+ atomic_unchecked_t count; /* interrupt count after last read */
30385 int xcalib; /* calibrated null value for x */
30386 int ycalib; /* calibrated null value for y */
30387 int zcalib; /* calibrated null value for z */
30388diff -urNp linux-2.6.32.46/drivers/hwmon/sht15.c linux-2.6.32.46/drivers/hwmon/sht15.c
30389--- linux-2.6.32.46/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
30390+++ linux-2.6.32.46/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
30391@@ -112,7 +112,7 @@ struct sht15_data {
30392 int supply_uV;
30393 int supply_uV_valid;
30394 struct work_struct update_supply_work;
30395- atomic_t interrupt_handled;
30396+ atomic_unchecked_t interrupt_handled;
30397 };
30398
30399 /**
30400@@ -245,13 +245,13 @@ static inline int sht15_update_single_va
30401 return ret;
30402
30403 gpio_direction_input(data->pdata->gpio_data);
30404- atomic_set(&data->interrupt_handled, 0);
30405+ atomic_set_unchecked(&data->interrupt_handled, 0);
30406
30407 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30408 if (gpio_get_value(data->pdata->gpio_data) == 0) {
30409 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
30410 /* Only relevant if the interrupt hasn't occured. */
30411- if (!atomic_read(&data->interrupt_handled))
30412+ if (!atomic_read_unchecked(&data->interrupt_handled))
30413 schedule_work(&data->read_work);
30414 }
30415 ret = wait_event_timeout(data->wait_queue,
30416@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
30417 struct sht15_data *data = d;
30418 /* First disable the interrupt */
30419 disable_irq_nosync(irq);
30420- atomic_inc(&data->interrupt_handled);
30421+ atomic_inc_unchecked(&data->interrupt_handled);
30422 /* Then schedule a reading work struct */
30423 if (data->flag != SHT15_READING_NOTHING)
30424 schedule_work(&data->read_work);
30425@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
30426 here as could have gone low in meantime so verify
30427 it hasn't!
30428 */
30429- atomic_set(&data->interrupt_handled, 0);
30430+ atomic_set_unchecked(&data->interrupt_handled, 0);
30431 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30432 /* If still not occured or another handler has been scheduled */
30433 if (gpio_get_value(data->pdata->gpio_data)
30434- || atomic_read(&data->interrupt_handled))
30435+ || atomic_read_unchecked(&data->interrupt_handled))
30436 return;
30437 }
30438 /* Read the data back from the device */
30439diff -urNp linux-2.6.32.46/drivers/hwmon/w83791d.c linux-2.6.32.46/drivers/hwmon/w83791d.c
30440--- linux-2.6.32.46/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
30441+++ linux-2.6.32.46/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
30442@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
30443 struct i2c_board_info *info);
30444 static int w83791d_remove(struct i2c_client *client);
30445
30446-static int w83791d_read(struct i2c_client *client, u8 register);
30447-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
30448+static int w83791d_read(struct i2c_client *client, u8 reg);
30449+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
30450 static struct w83791d_data *w83791d_update_device(struct device *dev);
30451
30452 #ifdef DEBUG
30453diff -urNp linux-2.6.32.46/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.32.46/drivers/i2c/busses/i2c-amd756-s4882.c
30454--- linux-2.6.32.46/drivers/i2c/busses/i2c-amd756-s4882.c 2011-03-27 14:31:47.000000000 -0400
30455+++ linux-2.6.32.46/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-23 21:22:32.000000000 -0400
30456@@ -43,7 +43,7 @@
30457 extern struct i2c_adapter amd756_smbus;
30458
30459 static struct i2c_adapter *s4882_adapter;
30460-static struct i2c_algorithm *s4882_algo;
30461+static i2c_algorithm_no_const *s4882_algo;
30462
30463 /* Wrapper access functions for multiplexed SMBus */
30464 static DEFINE_MUTEX(amd756_lock);
30465diff -urNp linux-2.6.32.46/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.32.46/drivers/i2c/busses/i2c-nforce2-s4985.c
30466--- linux-2.6.32.46/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-03-27 14:31:47.000000000 -0400
30467+++ linux-2.6.32.46/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-23 21:22:32.000000000 -0400
30468@@ -41,7 +41,7 @@
30469 extern struct i2c_adapter *nforce2_smbus;
30470
30471 static struct i2c_adapter *s4985_adapter;
30472-static struct i2c_algorithm *s4985_algo;
30473+static i2c_algorithm_no_const *s4985_algo;
30474
30475 /* Wrapper access functions for multiplexed SMBus */
30476 static DEFINE_MUTEX(nforce2_lock);
30477diff -urNp linux-2.6.32.46/drivers/ide/ide-cd.c linux-2.6.32.46/drivers/ide/ide-cd.c
30478--- linux-2.6.32.46/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
30479+++ linux-2.6.32.46/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
30480@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
30481 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
30482 if ((unsigned long)buf & alignment
30483 || blk_rq_bytes(rq) & q->dma_pad_mask
30484- || object_is_on_stack(buf))
30485+ || object_starts_on_stack(buf))
30486 drive->dma = 0;
30487 }
30488 }
30489diff -urNp linux-2.6.32.46/drivers/ide/ide-floppy.c linux-2.6.32.46/drivers/ide/ide-floppy.c
30490--- linux-2.6.32.46/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
30491+++ linux-2.6.32.46/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
30492@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
30493 u8 pc_buf[256], header_len, desc_cnt;
30494 int i, rc = 1, blocks, length;
30495
30496+ pax_track_stack();
30497+
30498 ide_debug_log(IDE_DBG_FUNC, "enter");
30499
30500 drive->bios_cyl = 0;
30501diff -urNp linux-2.6.32.46/drivers/ide/setup-pci.c linux-2.6.32.46/drivers/ide/setup-pci.c
30502--- linux-2.6.32.46/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
30503+++ linux-2.6.32.46/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
30504@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
30505 int ret, i, n_ports = dev2 ? 4 : 2;
30506 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
30507
30508+ pax_track_stack();
30509+
30510 for (i = 0; i < n_ports / 2; i++) {
30511 ret = ide_setup_pci_controller(pdev[i], d, !i);
30512 if (ret < 0)
30513diff -urNp linux-2.6.32.46/drivers/ieee1394/dv1394.c linux-2.6.32.46/drivers/ieee1394/dv1394.c
30514--- linux-2.6.32.46/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
30515+++ linux-2.6.32.46/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
30516@@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
30517 based upon DIF section and sequence
30518 */
30519
30520-static void inline
30521+static inline void
30522 frame_put_packet (struct frame *f, struct packet *p)
30523 {
30524 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
30525diff -urNp linux-2.6.32.46/drivers/ieee1394/hosts.c linux-2.6.32.46/drivers/ieee1394/hosts.c
30526--- linux-2.6.32.46/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
30527+++ linux-2.6.32.46/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
30528@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
30529 }
30530
30531 static struct hpsb_host_driver dummy_driver = {
30532+ .name = "dummy",
30533 .transmit_packet = dummy_transmit_packet,
30534 .devctl = dummy_devctl,
30535 .isoctl = dummy_isoctl
30536diff -urNp linux-2.6.32.46/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.46/drivers/ieee1394/init_ohci1394_dma.c
30537--- linux-2.6.32.46/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
30538+++ linux-2.6.32.46/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
30539@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
30540 for (func = 0; func < 8; func++) {
30541 u32 class = read_pci_config(num,slot,func,
30542 PCI_CLASS_REVISION);
30543- if ((class == 0xffffffff))
30544+ if (class == 0xffffffff)
30545 continue; /* No device at this func */
30546
30547 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
30548diff -urNp linux-2.6.32.46/drivers/ieee1394/ohci1394.c linux-2.6.32.46/drivers/ieee1394/ohci1394.c
30549--- linux-2.6.32.46/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
30550+++ linux-2.6.32.46/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
30551@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
30552 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
30553
30554 /* Module Parameters */
30555-static int phys_dma = 1;
30556+static int phys_dma;
30557 module_param(phys_dma, int, 0444);
30558-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
30559+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
30560
30561 static void dma_trm_tasklet(unsigned long data);
30562 static void dma_trm_reset(struct dma_trm_ctx *d);
30563diff -urNp linux-2.6.32.46/drivers/ieee1394/sbp2.c linux-2.6.32.46/drivers/ieee1394/sbp2.c
30564--- linux-2.6.32.46/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
30565+++ linux-2.6.32.46/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
30566@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
30567 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
30568 MODULE_LICENSE("GPL");
30569
30570-static int sbp2_module_init(void)
30571+static int __init sbp2_module_init(void)
30572 {
30573 int ret;
30574
30575diff -urNp linux-2.6.32.46/drivers/infiniband/core/cm.c linux-2.6.32.46/drivers/infiniband/core/cm.c
30576--- linux-2.6.32.46/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
30577+++ linux-2.6.32.46/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
30578@@ -112,7 +112,7 @@ static char const counter_group_names[CM
30579
30580 struct cm_counter_group {
30581 struct kobject obj;
30582- atomic_long_t counter[CM_ATTR_COUNT];
30583+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
30584 };
30585
30586 struct cm_counter_attribute {
30587@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
30588 struct ib_mad_send_buf *msg = NULL;
30589 int ret;
30590
30591- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30592+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30593 counter[CM_REQ_COUNTER]);
30594
30595 /* Quick state check to discard duplicate REQs. */
30596@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
30597 if (!cm_id_priv)
30598 return;
30599
30600- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30601+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30602 counter[CM_REP_COUNTER]);
30603 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
30604 if (ret)
30605@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
30606 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
30607 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
30608 spin_unlock_irq(&cm_id_priv->lock);
30609- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30610+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30611 counter[CM_RTU_COUNTER]);
30612 goto out;
30613 }
30614@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
30615 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
30616 dreq_msg->local_comm_id);
30617 if (!cm_id_priv) {
30618- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30619+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30620 counter[CM_DREQ_COUNTER]);
30621 cm_issue_drep(work->port, work->mad_recv_wc);
30622 return -EINVAL;
30623@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
30624 case IB_CM_MRA_REP_RCVD:
30625 break;
30626 case IB_CM_TIMEWAIT:
30627- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30628+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30629 counter[CM_DREQ_COUNTER]);
30630 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30631 goto unlock;
30632@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
30633 cm_free_msg(msg);
30634 goto deref;
30635 case IB_CM_DREQ_RCVD:
30636- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30637+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30638 counter[CM_DREQ_COUNTER]);
30639 goto unlock;
30640 default:
30641@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
30642 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30643 cm_id_priv->msg, timeout)) {
30644 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
30645- atomic_long_inc(&work->port->
30646+ atomic_long_inc_unchecked(&work->port->
30647 counter_group[CM_RECV_DUPLICATES].
30648 counter[CM_MRA_COUNTER]);
30649 goto out;
30650@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
30651 break;
30652 case IB_CM_MRA_REQ_RCVD:
30653 case IB_CM_MRA_REP_RCVD:
30654- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30655+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30656 counter[CM_MRA_COUNTER]);
30657 /* fall through */
30658 default:
30659@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
30660 case IB_CM_LAP_IDLE:
30661 break;
30662 case IB_CM_MRA_LAP_SENT:
30663- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30664+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30665 counter[CM_LAP_COUNTER]);
30666 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30667 goto unlock;
30668@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
30669 cm_free_msg(msg);
30670 goto deref;
30671 case IB_CM_LAP_RCVD:
30672- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30673+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30674 counter[CM_LAP_COUNTER]);
30675 goto unlock;
30676 default:
30677@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
30678 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
30679 if (cur_cm_id_priv) {
30680 spin_unlock_irq(&cm.lock);
30681- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30682+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30683 counter[CM_SIDR_REQ_COUNTER]);
30684 goto out; /* Duplicate message. */
30685 }
30686@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
30687 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
30688 msg->retries = 1;
30689
30690- atomic_long_add(1 + msg->retries,
30691+ atomic_long_add_unchecked(1 + msg->retries,
30692 &port->counter_group[CM_XMIT].counter[attr_index]);
30693 if (msg->retries)
30694- atomic_long_add(msg->retries,
30695+ atomic_long_add_unchecked(msg->retries,
30696 &port->counter_group[CM_XMIT_RETRIES].
30697 counter[attr_index]);
30698
30699@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
30700 }
30701
30702 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
30703- atomic_long_inc(&port->counter_group[CM_RECV].
30704+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
30705 counter[attr_id - CM_ATTR_ID_OFFSET]);
30706
30707 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
30708@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
30709 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
30710
30711 return sprintf(buf, "%ld\n",
30712- atomic_long_read(&group->counter[cm_attr->index]));
30713+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
30714 }
30715
30716-static struct sysfs_ops cm_counter_ops = {
30717+static const struct sysfs_ops cm_counter_ops = {
30718 .show = cm_show_counter
30719 };
30720
30721diff -urNp linux-2.6.32.46/drivers/infiniband/core/fmr_pool.c linux-2.6.32.46/drivers/infiniband/core/fmr_pool.c
30722--- linux-2.6.32.46/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
30723+++ linux-2.6.32.46/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
30724@@ -97,8 +97,8 @@ struct ib_fmr_pool {
30725
30726 struct task_struct *thread;
30727
30728- atomic_t req_ser;
30729- atomic_t flush_ser;
30730+ atomic_unchecked_t req_ser;
30731+ atomic_unchecked_t flush_ser;
30732
30733 wait_queue_head_t force_wait;
30734 };
30735@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
30736 struct ib_fmr_pool *pool = pool_ptr;
30737
30738 do {
30739- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30740+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30741 ib_fmr_batch_release(pool);
30742
30743- atomic_inc(&pool->flush_ser);
30744+ atomic_inc_unchecked(&pool->flush_ser);
30745 wake_up_interruptible(&pool->force_wait);
30746
30747 if (pool->flush_function)
30748@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
30749 }
30750
30751 set_current_state(TASK_INTERRUPTIBLE);
30752- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30753+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30754 !kthread_should_stop())
30755 schedule();
30756 __set_current_state(TASK_RUNNING);
30757@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
30758 pool->dirty_watermark = params->dirty_watermark;
30759 pool->dirty_len = 0;
30760 spin_lock_init(&pool->pool_lock);
30761- atomic_set(&pool->req_ser, 0);
30762- atomic_set(&pool->flush_ser, 0);
30763+ atomic_set_unchecked(&pool->req_ser, 0);
30764+ atomic_set_unchecked(&pool->flush_ser, 0);
30765 init_waitqueue_head(&pool->force_wait);
30766
30767 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30768@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
30769 }
30770 spin_unlock_irq(&pool->pool_lock);
30771
30772- serial = atomic_inc_return(&pool->req_ser);
30773+ serial = atomic_inc_return_unchecked(&pool->req_ser);
30774 wake_up_process(pool->thread);
30775
30776 if (wait_event_interruptible(pool->force_wait,
30777- atomic_read(&pool->flush_ser) - serial >= 0))
30778+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30779 return -EINTR;
30780
30781 return 0;
30782@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
30783 } else {
30784 list_add_tail(&fmr->list, &pool->dirty_list);
30785 if (++pool->dirty_len >= pool->dirty_watermark) {
30786- atomic_inc(&pool->req_ser);
30787+ atomic_inc_unchecked(&pool->req_ser);
30788 wake_up_process(pool->thread);
30789 }
30790 }
30791diff -urNp linux-2.6.32.46/drivers/infiniband/core/sysfs.c linux-2.6.32.46/drivers/infiniband/core/sysfs.c
30792--- linux-2.6.32.46/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
30793+++ linux-2.6.32.46/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
30794@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
30795 return port_attr->show(p, port_attr, buf);
30796 }
30797
30798-static struct sysfs_ops port_sysfs_ops = {
30799+static const struct sysfs_ops port_sysfs_ops = {
30800 .show = port_attr_show
30801 };
30802
30803diff -urNp linux-2.6.32.46/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.46/drivers/infiniband/core/uverbs_marshall.c
30804--- linux-2.6.32.46/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
30805+++ linux-2.6.32.46/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
30806@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
30807 dst->grh.sgid_index = src->grh.sgid_index;
30808 dst->grh.hop_limit = src->grh.hop_limit;
30809 dst->grh.traffic_class = src->grh.traffic_class;
30810+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
30811 dst->dlid = src->dlid;
30812 dst->sl = src->sl;
30813 dst->src_path_bits = src->src_path_bits;
30814 dst->static_rate = src->static_rate;
30815 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
30816 dst->port_num = src->port_num;
30817+ dst->reserved = 0;
30818 }
30819 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
30820
30821 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
30822 struct ib_qp_attr *src)
30823 {
30824+ dst->qp_state = src->qp_state;
30825 dst->cur_qp_state = src->cur_qp_state;
30826 dst->path_mtu = src->path_mtu;
30827 dst->path_mig_state = src->path_mig_state;
30828@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
30829 dst->rnr_retry = src->rnr_retry;
30830 dst->alt_port_num = src->alt_port_num;
30831 dst->alt_timeout = src->alt_timeout;
30832+ memset(dst->reserved, 0, sizeof(dst->reserved));
30833 }
30834 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
30835
30836diff -urNp linux-2.6.32.46/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.46/drivers/infiniband/hw/ipath/ipath_fs.c
30837--- linux-2.6.32.46/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
30838+++ linux-2.6.32.46/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
30839@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
30840 struct infinipath_counters counters;
30841 struct ipath_devdata *dd;
30842
30843+ pax_track_stack();
30844+
30845 dd = file->f_path.dentry->d_inode->i_private;
30846 dd->ipath_f_read_counters(dd, &counters);
30847
30848diff -urNp linux-2.6.32.46/drivers/infiniband/hw/nes/nes.c linux-2.6.32.46/drivers/infiniband/hw/nes/nes.c
30849--- linux-2.6.32.46/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
30850+++ linux-2.6.32.46/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
30851@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
30852 LIST_HEAD(nes_adapter_list);
30853 static LIST_HEAD(nes_dev_list);
30854
30855-atomic_t qps_destroyed;
30856+atomic_unchecked_t qps_destroyed;
30857
30858 static unsigned int ee_flsh_adapter;
30859 static unsigned int sysfs_nonidx_addr;
30860@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
30861 struct nes_adapter *nesadapter = nesdev->nesadapter;
30862 u32 qp_id;
30863
30864- atomic_inc(&qps_destroyed);
30865+ atomic_inc_unchecked(&qps_destroyed);
30866
30867 /* Free the control structures */
30868
30869diff -urNp linux-2.6.32.46/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.46/drivers/infiniband/hw/nes/nes_cm.c
30870--- linux-2.6.32.46/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
30871+++ linux-2.6.32.46/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
30872@@ -69,11 +69,11 @@ u32 cm_packets_received;
30873 u32 cm_listens_created;
30874 u32 cm_listens_destroyed;
30875 u32 cm_backlog_drops;
30876-atomic_t cm_loopbacks;
30877-atomic_t cm_nodes_created;
30878-atomic_t cm_nodes_destroyed;
30879-atomic_t cm_accel_dropped_pkts;
30880-atomic_t cm_resets_recvd;
30881+atomic_unchecked_t cm_loopbacks;
30882+atomic_unchecked_t cm_nodes_created;
30883+atomic_unchecked_t cm_nodes_destroyed;
30884+atomic_unchecked_t cm_accel_dropped_pkts;
30885+atomic_unchecked_t cm_resets_recvd;
30886
30887 static inline int mini_cm_accelerated(struct nes_cm_core *,
30888 struct nes_cm_node *);
30889@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
30890
30891 static struct nes_cm_core *g_cm_core;
30892
30893-atomic_t cm_connects;
30894-atomic_t cm_accepts;
30895-atomic_t cm_disconnects;
30896-atomic_t cm_closes;
30897-atomic_t cm_connecteds;
30898-atomic_t cm_connect_reqs;
30899-atomic_t cm_rejects;
30900+atomic_unchecked_t cm_connects;
30901+atomic_unchecked_t cm_accepts;
30902+atomic_unchecked_t cm_disconnects;
30903+atomic_unchecked_t cm_closes;
30904+atomic_unchecked_t cm_connecteds;
30905+atomic_unchecked_t cm_connect_reqs;
30906+atomic_unchecked_t cm_rejects;
30907
30908
30909 /**
30910@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
30911 cm_node->rem_mac);
30912
30913 add_hte_node(cm_core, cm_node);
30914- atomic_inc(&cm_nodes_created);
30915+ atomic_inc_unchecked(&cm_nodes_created);
30916
30917 return cm_node;
30918 }
30919@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
30920 }
30921
30922 atomic_dec(&cm_core->node_cnt);
30923- atomic_inc(&cm_nodes_destroyed);
30924+ atomic_inc_unchecked(&cm_nodes_destroyed);
30925 nesqp = cm_node->nesqp;
30926 if (nesqp) {
30927 nesqp->cm_node = NULL;
30928@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
30929
30930 static void drop_packet(struct sk_buff *skb)
30931 {
30932- atomic_inc(&cm_accel_dropped_pkts);
30933+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30934 dev_kfree_skb_any(skb);
30935 }
30936
30937@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
30938
30939 int reset = 0; /* whether to send reset in case of err.. */
30940 int passive_state;
30941- atomic_inc(&cm_resets_recvd);
30942+ atomic_inc_unchecked(&cm_resets_recvd);
30943 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30944 " refcnt=%d\n", cm_node, cm_node->state,
30945 atomic_read(&cm_node->ref_count));
30946@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
30947 rem_ref_cm_node(cm_node->cm_core, cm_node);
30948 return NULL;
30949 }
30950- atomic_inc(&cm_loopbacks);
30951+ atomic_inc_unchecked(&cm_loopbacks);
30952 loopbackremotenode->loopbackpartner = cm_node;
30953 loopbackremotenode->tcp_cntxt.rcv_wscale =
30954 NES_CM_DEFAULT_RCV_WND_SCALE;
30955@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
30956 add_ref_cm_node(cm_node);
30957 } else if (cm_node->state == NES_CM_STATE_TSA) {
30958 rem_ref_cm_node(cm_core, cm_node);
30959- atomic_inc(&cm_accel_dropped_pkts);
30960+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30961 dev_kfree_skb_any(skb);
30962 break;
30963 }
30964@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
30965
30966 if ((cm_id) && (cm_id->event_handler)) {
30967 if (issue_disconn) {
30968- atomic_inc(&cm_disconnects);
30969+ atomic_inc_unchecked(&cm_disconnects);
30970 cm_event.event = IW_CM_EVENT_DISCONNECT;
30971 cm_event.status = disconn_status;
30972 cm_event.local_addr = cm_id->local_addr;
30973@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
30974 }
30975
30976 if (issue_close) {
30977- atomic_inc(&cm_closes);
30978+ atomic_inc_unchecked(&cm_closes);
30979 nes_disconnect(nesqp, 1);
30980
30981 cm_id->provider_data = nesqp;
30982@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
30983
30984 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30985 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30986- atomic_inc(&cm_accepts);
30987+ atomic_inc_unchecked(&cm_accepts);
30988
30989 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30990 atomic_read(&nesvnic->netdev->refcnt));
30991@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
30992
30993 struct nes_cm_core *cm_core;
30994
30995- atomic_inc(&cm_rejects);
30996+ atomic_inc_unchecked(&cm_rejects);
30997 cm_node = (struct nes_cm_node *) cm_id->provider_data;
30998 loopback = cm_node->loopbackpartner;
30999 cm_core = cm_node->cm_core;
31000@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
31001 ntohl(cm_id->local_addr.sin_addr.s_addr),
31002 ntohs(cm_id->local_addr.sin_port));
31003
31004- atomic_inc(&cm_connects);
31005+ atomic_inc_unchecked(&cm_connects);
31006 nesqp->active_conn = 1;
31007
31008 /* cache the cm_id in the qp */
31009@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
31010 if (nesqp->destroyed) {
31011 return;
31012 }
31013- atomic_inc(&cm_connecteds);
31014+ atomic_inc_unchecked(&cm_connecteds);
31015 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
31016 " local port 0x%04X. jiffies = %lu.\n",
31017 nesqp->hwqp.qp_id,
31018@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
31019
31020 ret = cm_id->event_handler(cm_id, &cm_event);
31021 cm_id->add_ref(cm_id);
31022- atomic_inc(&cm_closes);
31023+ atomic_inc_unchecked(&cm_closes);
31024 cm_event.event = IW_CM_EVENT_CLOSE;
31025 cm_event.status = IW_CM_EVENT_STATUS_OK;
31026 cm_event.provider_data = cm_id->provider_data;
31027@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
31028 return;
31029 cm_id = cm_node->cm_id;
31030
31031- atomic_inc(&cm_connect_reqs);
31032+ atomic_inc_unchecked(&cm_connect_reqs);
31033 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31034 cm_node, cm_id, jiffies);
31035
31036@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
31037 return;
31038 cm_id = cm_node->cm_id;
31039
31040- atomic_inc(&cm_connect_reqs);
31041+ atomic_inc_unchecked(&cm_connect_reqs);
31042 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31043 cm_node, cm_id, jiffies);
31044
31045diff -urNp linux-2.6.32.46/drivers/infiniband/hw/nes/nes.h linux-2.6.32.46/drivers/infiniband/hw/nes/nes.h
31046--- linux-2.6.32.46/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
31047+++ linux-2.6.32.46/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
31048@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
31049 extern unsigned int wqm_quanta;
31050 extern struct list_head nes_adapter_list;
31051
31052-extern atomic_t cm_connects;
31053-extern atomic_t cm_accepts;
31054-extern atomic_t cm_disconnects;
31055-extern atomic_t cm_closes;
31056-extern atomic_t cm_connecteds;
31057-extern atomic_t cm_connect_reqs;
31058-extern atomic_t cm_rejects;
31059-extern atomic_t mod_qp_timouts;
31060-extern atomic_t qps_created;
31061-extern atomic_t qps_destroyed;
31062-extern atomic_t sw_qps_destroyed;
31063+extern atomic_unchecked_t cm_connects;
31064+extern atomic_unchecked_t cm_accepts;
31065+extern atomic_unchecked_t cm_disconnects;
31066+extern atomic_unchecked_t cm_closes;
31067+extern atomic_unchecked_t cm_connecteds;
31068+extern atomic_unchecked_t cm_connect_reqs;
31069+extern atomic_unchecked_t cm_rejects;
31070+extern atomic_unchecked_t mod_qp_timouts;
31071+extern atomic_unchecked_t qps_created;
31072+extern atomic_unchecked_t qps_destroyed;
31073+extern atomic_unchecked_t sw_qps_destroyed;
31074 extern u32 mh_detected;
31075 extern u32 mh_pauses_sent;
31076 extern u32 cm_packets_sent;
31077@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
31078 extern u32 cm_listens_created;
31079 extern u32 cm_listens_destroyed;
31080 extern u32 cm_backlog_drops;
31081-extern atomic_t cm_loopbacks;
31082-extern atomic_t cm_nodes_created;
31083-extern atomic_t cm_nodes_destroyed;
31084-extern atomic_t cm_accel_dropped_pkts;
31085-extern atomic_t cm_resets_recvd;
31086+extern atomic_unchecked_t cm_loopbacks;
31087+extern atomic_unchecked_t cm_nodes_created;
31088+extern atomic_unchecked_t cm_nodes_destroyed;
31089+extern atomic_unchecked_t cm_accel_dropped_pkts;
31090+extern atomic_unchecked_t cm_resets_recvd;
31091
31092 extern u32 int_mod_timer_init;
31093 extern u32 int_mod_cq_depth_256;
31094diff -urNp linux-2.6.32.46/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.46/drivers/infiniband/hw/nes/nes_nic.c
31095--- linux-2.6.32.46/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
31096+++ linux-2.6.32.46/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
31097@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
31098 target_stat_values[++index] = mh_detected;
31099 target_stat_values[++index] = mh_pauses_sent;
31100 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
31101- target_stat_values[++index] = atomic_read(&cm_connects);
31102- target_stat_values[++index] = atomic_read(&cm_accepts);
31103- target_stat_values[++index] = atomic_read(&cm_disconnects);
31104- target_stat_values[++index] = atomic_read(&cm_connecteds);
31105- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
31106- target_stat_values[++index] = atomic_read(&cm_rejects);
31107- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
31108- target_stat_values[++index] = atomic_read(&qps_created);
31109- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
31110- target_stat_values[++index] = atomic_read(&qps_destroyed);
31111- target_stat_values[++index] = atomic_read(&cm_closes);
31112+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
31113+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
31114+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
31115+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
31116+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
31117+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
31118+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
31119+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
31120+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
31121+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
31122+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
31123 target_stat_values[++index] = cm_packets_sent;
31124 target_stat_values[++index] = cm_packets_bounced;
31125 target_stat_values[++index] = cm_packets_created;
31126@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
31127 target_stat_values[++index] = cm_listens_created;
31128 target_stat_values[++index] = cm_listens_destroyed;
31129 target_stat_values[++index] = cm_backlog_drops;
31130- target_stat_values[++index] = atomic_read(&cm_loopbacks);
31131- target_stat_values[++index] = atomic_read(&cm_nodes_created);
31132- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
31133- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
31134- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
31135+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
31136+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
31137+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
31138+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
31139+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
31140 target_stat_values[++index] = int_mod_timer_init;
31141 target_stat_values[++index] = int_mod_cq_depth_1;
31142 target_stat_values[++index] = int_mod_cq_depth_4;
31143diff -urNp linux-2.6.32.46/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.46/drivers/infiniband/hw/nes/nes_verbs.c
31144--- linux-2.6.32.46/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
31145+++ linux-2.6.32.46/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
31146@@ -45,9 +45,9 @@
31147
31148 #include <rdma/ib_umem.h>
31149
31150-atomic_t mod_qp_timouts;
31151-atomic_t qps_created;
31152-atomic_t sw_qps_destroyed;
31153+atomic_unchecked_t mod_qp_timouts;
31154+atomic_unchecked_t qps_created;
31155+atomic_unchecked_t sw_qps_destroyed;
31156
31157 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
31158
31159@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
31160 if (init_attr->create_flags)
31161 return ERR_PTR(-EINVAL);
31162
31163- atomic_inc(&qps_created);
31164+ atomic_inc_unchecked(&qps_created);
31165 switch (init_attr->qp_type) {
31166 case IB_QPT_RC:
31167 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
31168@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
31169 struct iw_cm_event cm_event;
31170 int ret;
31171
31172- atomic_inc(&sw_qps_destroyed);
31173+ atomic_inc_unchecked(&sw_qps_destroyed);
31174 nesqp->destroyed = 1;
31175
31176 /* Blow away the connection if it exists. */
31177diff -urNp linux-2.6.32.46/drivers/input/gameport/gameport.c linux-2.6.32.46/drivers/input/gameport/gameport.c
31178--- linux-2.6.32.46/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
31179+++ linux-2.6.32.46/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
31180@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
31181 */
31182 static void gameport_init_port(struct gameport *gameport)
31183 {
31184- static atomic_t gameport_no = ATOMIC_INIT(0);
31185+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
31186
31187 __module_get(THIS_MODULE);
31188
31189 mutex_init(&gameport->drv_mutex);
31190 device_initialize(&gameport->dev);
31191- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
31192+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
31193 gameport->dev.bus = &gameport_bus;
31194 gameport->dev.release = gameport_release_port;
31195 if (gameport->parent)
31196diff -urNp linux-2.6.32.46/drivers/input/input.c linux-2.6.32.46/drivers/input/input.c
31197--- linux-2.6.32.46/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
31198+++ linux-2.6.32.46/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
31199@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
31200 */
31201 int input_register_device(struct input_dev *dev)
31202 {
31203- static atomic_t input_no = ATOMIC_INIT(0);
31204+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
31205 struct input_handler *handler;
31206 const char *path;
31207 int error;
31208@@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
31209 dev->setkeycode = input_default_setkeycode;
31210
31211 dev_set_name(&dev->dev, "input%ld",
31212- (unsigned long) atomic_inc_return(&input_no) - 1);
31213+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
31214
31215 error = device_add(&dev->dev);
31216 if (error)
31217diff -urNp linux-2.6.32.46/drivers/input/joystick/sidewinder.c linux-2.6.32.46/drivers/input/joystick/sidewinder.c
31218--- linux-2.6.32.46/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
31219+++ linux-2.6.32.46/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
31220@@ -30,6 +30,7 @@
31221 #include <linux/kernel.h>
31222 #include <linux/module.h>
31223 #include <linux/slab.h>
31224+#include <linux/sched.h>
31225 #include <linux/init.h>
31226 #include <linux/input.h>
31227 #include <linux/gameport.h>
31228@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
31229 unsigned char buf[SW_LENGTH];
31230 int i;
31231
31232+ pax_track_stack();
31233+
31234 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
31235
31236 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
31237diff -urNp linux-2.6.32.46/drivers/input/joystick/xpad.c linux-2.6.32.46/drivers/input/joystick/xpad.c
31238--- linux-2.6.32.46/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
31239+++ linux-2.6.32.46/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
31240@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
31241
31242 static int xpad_led_probe(struct usb_xpad *xpad)
31243 {
31244- static atomic_t led_seq = ATOMIC_INIT(0);
31245+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
31246 long led_no;
31247 struct xpad_led *led;
31248 struct led_classdev *led_cdev;
31249@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
31250 if (!led)
31251 return -ENOMEM;
31252
31253- led_no = (long)atomic_inc_return(&led_seq) - 1;
31254+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
31255
31256 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
31257 led->xpad = xpad;
31258diff -urNp linux-2.6.32.46/drivers/input/serio/serio.c linux-2.6.32.46/drivers/input/serio/serio.c
31259--- linux-2.6.32.46/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
31260+++ linux-2.6.32.46/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
31261@@ -527,7 +527,7 @@ static void serio_release_port(struct de
31262 */
31263 static void serio_init_port(struct serio *serio)
31264 {
31265- static atomic_t serio_no = ATOMIC_INIT(0);
31266+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
31267
31268 __module_get(THIS_MODULE);
31269
31270@@ -536,7 +536,7 @@ static void serio_init_port(struct serio
31271 mutex_init(&serio->drv_mutex);
31272 device_initialize(&serio->dev);
31273 dev_set_name(&serio->dev, "serio%ld",
31274- (long)atomic_inc_return(&serio_no) - 1);
31275+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
31276 serio->dev.bus = &serio_bus;
31277 serio->dev.release = serio_release_port;
31278 if (serio->parent) {
31279diff -urNp linux-2.6.32.46/drivers/isdn/gigaset/common.c linux-2.6.32.46/drivers/isdn/gigaset/common.c
31280--- linux-2.6.32.46/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
31281+++ linux-2.6.32.46/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
31282@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
31283 cs->commands_pending = 0;
31284 cs->cur_at_seq = 0;
31285 cs->gotfwver = -1;
31286- cs->open_count = 0;
31287+ local_set(&cs->open_count, 0);
31288 cs->dev = NULL;
31289 cs->tty = NULL;
31290 cs->tty_dev = NULL;
31291diff -urNp linux-2.6.32.46/drivers/isdn/gigaset/gigaset.h linux-2.6.32.46/drivers/isdn/gigaset/gigaset.h
31292--- linux-2.6.32.46/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
31293+++ linux-2.6.32.46/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
31294@@ -34,6 +34,7 @@
31295 #include <linux/tty_driver.h>
31296 #include <linux/list.h>
31297 #include <asm/atomic.h>
31298+#include <asm/local.h>
31299
31300 #define GIG_VERSION {0,5,0,0}
31301 #define GIG_COMPAT {0,4,0,0}
31302@@ -446,7 +447,7 @@ struct cardstate {
31303 spinlock_t cmdlock;
31304 unsigned curlen, cmdbytes;
31305
31306- unsigned open_count;
31307+ local_t open_count;
31308 struct tty_struct *tty;
31309 struct tasklet_struct if_wake_tasklet;
31310 unsigned control_state;
31311diff -urNp linux-2.6.32.46/drivers/isdn/gigaset/interface.c linux-2.6.32.46/drivers/isdn/gigaset/interface.c
31312--- linux-2.6.32.46/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
31313+++ linux-2.6.32.46/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
31314@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
31315 return -ERESTARTSYS; // FIXME -EINTR?
31316 tty->driver_data = cs;
31317
31318- ++cs->open_count;
31319-
31320- if (cs->open_count == 1) {
31321+ if (local_inc_return(&cs->open_count) == 1) {
31322 spin_lock_irqsave(&cs->lock, flags);
31323 cs->tty = tty;
31324 spin_unlock_irqrestore(&cs->lock, flags);
31325@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
31326
31327 if (!cs->connected)
31328 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31329- else if (!cs->open_count)
31330+ else if (!local_read(&cs->open_count))
31331 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31332 else {
31333- if (!--cs->open_count) {
31334+ if (!local_dec_return(&cs->open_count)) {
31335 spin_lock_irqsave(&cs->lock, flags);
31336 cs->tty = NULL;
31337 spin_unlock_irqrestore(&cs->lock, flags);
31338@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
31339 if (!cs->connected) {
31340 gig_dbg(DEBUG_IF, "not connected");
31341 retval = -ENODEV;
31342- } else if (!cs->open_count)
31343+ } else if (!local_read(&cs->open_count))
31344 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31345 else {
31346 retval = 0;
31347@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
31348 if (!cs->connected) {
31349 gig_dbg(DEBUG_IF, "not connected");
31350 retval = -ENODEV;
31351- } else if (!cs->open_count)
31352+ } else if (!local_read(&cs->open_count))
31353 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31354 else if (cs->mstate != MS_LOCKED) {
31355 dev_warn(cs->dev, "can't write to unlocked device\n");
31356@@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
31357 if (!cs->connected) {
31358 gig_dbg(DEBUG_IF, "not connected");
31359 retval = -ENODEV;
31360- } else if (!cs->open_count)
31361+ } else if (!local_read(&cs->open_count))
31362 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31363 else if (cs->mstate != MS_LOCKED) {
31364 dev_warn(cs->dev, "can't write to unlocked device\n");
31365@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
31366
31367 if (!cs->connected)
31368 gig_dbg(DEBUG_IF, "not connected");
31369- else if (!cs->open_count)
31370+ else if (!local_read(&cs->open_count))
31371 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31372 else if (cs->mstate != MS_LOCKED)
31373 dev_warn(cs->dev, "can't write to unlocked device\n");
31374@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
31375
31376 if (!cs->connected)
31377 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31378- else if (!cs->open_count)
31379+ else if (!local_read(&cs->open_count))
31380 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31381 else {
31382 //FIXME
31383@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
31384
31385 if (!cs->connected)
31386 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31387- else if (!cs->open_count)
31388+ else if (!local_read(&cs->open_count))
31389 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31390 else {
31391 //FIXME
31392@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
31393 goto out;
31394 }
31395
31396- if (!cs->open_count) {
31397+ if (!local_read(&cs->open_count)) {
31398 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31399 goto out;
31400 }
31401diff -urNp linux-2.6.32.46/drivers/isdn/hardware/avm/b1.c linux-2.6.32.46/drivers/isdn/hardware/avm/b1.c
31402--- linux-2.6.32.46/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
31403+++ linux-2.6.32.46/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
31404@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
31405 }
31406 if (left) {
31407 if (t4file->user) {
31408- if (copy_from_user(buf, dp, left))
31409+ if (left > sizeof buf || copy_from_user(buf, dp, left))
31410 return -EFAULT;
31411 } else {
31412 memcpy(buf, dp, left);
31413@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
31414 }
31415 if (left) {
31416 if (config->user) {
31417- if (copy_from_user(buf, dp, left))
31418+ if (left > sizeof buf || copy_from_user(buf, dp, left))
31419 return -EFAULT;
31420 } else {
31421 memcpy(buf, dp, left);
31422diff -urNp linux-2.6.32.46/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.46/drivers/isdn/hardware/eicon/capidtmf.c
31423--- linux-2.6.32.46/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
31424+++ linux-2.6.32.46/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
31425@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
31426 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
31427 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
31428
31429+ pax_track_stack();
31430
31431 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
31432 {
31433diff -urNp linux-2.6.32.46/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.46/drivers/isdn/hardware/eicon/capifunc.c
31434--- linux-2.6.32.46/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
31435+++ linux-2.6.32.46/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
31436@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
31437 IDI_SYNC_REQ req;
31438 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31439
31440+ pax_track_stack();
31441+
31442 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31443
31444 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31445diff -urNp linux-2.6.32.46/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.46/drivers/isdn/hardware/eicon/diddfunc.c
31446--- linux-2.6.32.46/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
31447+++ linux-2.6.32.46/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
31448@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
31449 IDI_SYNC_REQ req;
31450 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31451
31452+ pax_track_stack();
31453+
31454 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31455
31456 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31457diff -urNp linux-2.6.32.46/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.46/drivers/isdn/hardware/eicon/divasfunc.c
31458--- linux-2.6.32.46/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
31459+++ linux-2.6.32.46/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
31460@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
31461 IDI_SYNC_REQ req;
31462 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31463
31464+ pax_track_stack();
31465+
31466 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31467
31468 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31469diff -urNp linux-2.6.32.46/drivers/isdn/hardware/eicon/divasync.h linux-2.6.32.46/drivers/isdn/hardware/eicon/divasync.h
31470--- linux-2.6.32.46/drivers/isdn/hardware/eicon/divasync.h 2011-03-27 14:31:47.000000000 -0400
31471+++ linux-2.6.32.46/drivers/isdn/hardware/eicon/divasync.h 2011-08-05 20:33:55.000000000 -0400
31472@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
31473 } diva_didd_add_adapter_t;
31474 typedef struct _diva_didd_remove_adapter {
31475 IDI_CALL p_request;
31476-} diva_didd_remove_adapter_t;
31477+} __no_const diva_didd_remove_adapter_t;
31478 typedef struct _diva_didd_read_adapter_array {
31479 void * buffer;
31480 dword length;
31481diff -urNp linux-2.6.32.46/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.46/drivers/isdn/hardware/eicon/idifunc.c
31482--- linux-2.6.32.46/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
31483+++ linux-2.6.32.46/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
31484@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
31485 IDI_SYNC_REQ req;
31486 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31487
31488+ pax_track_stack();
31489+
31490 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31491
31492 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31493diff -urNp linux-2.6.32.46/drivers/isdn/hardware/eicon/message.c linux-2.6.32.46/drivers/isdn/hardware/eicon/message.c
31494--- linux-2.6.32.46/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
31495+++ linux-2.6.32.46/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
31496@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
31497 dword d;
31498 word w;
31499
31500+ pax_track_stack();
31501+
31502 a = plci->adapter;
31503 Id = ((word)plci->Id<<8)|a->Id;
31504 PUT_WORD(&SS_Ind[4],0x0000);
31505@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
31506 word j, n, w;
31507 dword d;
31508
31509+ pax_track_stack();
31510+
31511
31512 for(i=0;i<8;i++) bp_parms[i].length = 0;
31513 for(i=0;i<2;i++) global_config[i].length = 0;
31514@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
31515 const byte llc3[] = {4,3,2,2,6,6,0};
31516 const byte header[] = {0,2,3,3,0,0,0};
31517
31518+ pax_track_stack();
31519+
31520 for(i=0;i<8;i++) bp_parms[i].length = 0;
31521 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
31522 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
31523@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
31524 word appl_number_group_type[MAX_APPL];
31525 PLCI *auxplci;
31526
31527+ pax_track_stack();
31528+
31529 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
31530
31531 if(!a->group_optimization_enabled)
31532diff -urNp linux-2.6.32.46/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.46/drivers/isdn/hardware/eicon/mntfunc.c
31533--- linux-2.6.32.46/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
31534+++ linux-2.6.32.46/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
31535@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
31536 IDI_SYNC_REQ req;
31537 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31538
31539+ pax_track_stack();
31540+
31541 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31542
31543 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31544diff -urNp linux-2.6.32.46/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.32.46/drivers/isdn/hardware/eicon/xdi_adapter.h
31545--- linux-2.6.32.46/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-03-27 14:31:47.000000000 -0400
31546+++ linux-2.6.32.46/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-05 20:33:55.000000000 -0400
31547@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
31548 typedef struct _diva_os_idi_adapter_interface {
31549 diva_init_card_proc_t cleanup_adapter_proc;
31550 diva_cmd_card_proc_t cmd_proc;
31551-} diva_os_idi_adapter_interface_t;
31552+} __no_const diva_os_idi_adapter_interface_t;
31553
31554 typedef struct _diva_os_xdi_adapter {
31555 struct list_head link;
31556diff -urNp linux-2.6.32.46/drivers/isdn/i4l/isdn_common.c linux-2.6.32.46/drivers/isdn/i4l/isdn_common.c
31557--- linux-2.6.32.46/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
31558+++ linux-2.6.32.46/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
31559@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
31560 } iocpar;
31561 void __user *argp = (void __user *)arg;
31562
31563+ pax_track_stack();
31564+
31565 #define name iocpar.name
31566 #define bname iocpar.bname
31567 #define iocts iocpar.iocts
31568diff -urNp linux-2.6.32.46/drivers/isdn/icn/icn.c linux-2.6.32.46/drivers/isdn/icn/icn.c
31569--- linux-2.6.32.46/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
31570+++ linux-2.6.32.46/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
31571@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
31572 if (count > len)
31573 count = len;
31574 if (user) {
31575- if (copy_from_user(msg, buf, count))
31576+ if (count > sizeof msg || copy_from_user(msg, buf, count))
31577 return -EFAULT;
31578 } else
31579 memcpy(msg, buf, count);
31580diff -urNp linux-2.6.32.46/drivers/isdn/mISDN/socket.c linux-2.6.32.46/drivers/isdn/mISDN/socket.c
31581--- linux-2.6.32.46/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
31582+++ linux-2.6.32.46/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
31583@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
31584 if (dev) {
31585 struct mISDN_devinfo di;
31586
31587+ memset(&di, 0, sizeof(di));
31588 di.id = dev->id;
31589 di.Dprotocols = dev->Dprotocols;
31590 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31591@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
31592 if (dev) {
31593 struct mISDN_devinfo di;
31594
31595+ memset(&di, 0, sizeof(di));
31596 di.id = dev->id;
31597 di.Dprotocols = dev->Dprotocols;
31598 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31599diff -urNp linux-2.6.32.46/drivers/isdn/sc/interrupt.c linux-2.6.32.46/drivers/isdn/sc/interrupt.c
31600--- linux-2.6.32.46/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
31601+++ linux-2.6.32.46/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
31602@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
31603 }
31604 else if(callid>=0x0000 && callid<=0x7FFF)
31605 {
31606+ int len;
31607+
31608 pr_debug("%s: Got Incoming Call\n",
31609 sc_adapter[card]->devicename);
31610- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
31611- strcpy(setup.eazmsn,
31612- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
31613+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
31614+ sizeof(setup.phone));
31615+ if (len >= sizeof(setup.phone))
31616+ continue;
31617+ len = strlcpy(setup.eazmsn,
31618+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31619+ sizeof(setup.eazmsn));
31620+ if (len >= sizeof(setup.eazmsn))
31621+ continue;
31622 setup.si1 = 7;
31623 setup.si2 = 0;
31624 setup.plan = 0;
31625@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
31626 * Handle a GetMyNumber Rsp
31627 */
31628 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
31629- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
31630+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31631+ rcvmsg.msg_data.byte_array,
31632+ sizeof(rcvmsg.msg_data.byte_array));
31633 continue;
31634 }
31635
31636diff -urNp linux-2.6.32.46/drivers/lguest/core.c linux-2.6.32.46/drivers/lguest/core.c
31637--- linux-2.6.32.46/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
31638+++ linux-2.6.32.46/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
31639@@ -91,9 +91,17 @@ static __init int map_switcher(void)
31640 * it's worked so far. The end address needs +1 because __get_vm_area
31641 * allocates an extra guard page, so we need space for that.
31642 */
31643+
31644+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31645+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31646+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
31647+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31648+#else
31649 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31650 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
31651 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31652+#endif
31653+
31654 if (!switcher_vma) {
31655 err = -ENOMEM;
31656 printk("lguest: could not map switcher pages high\n");
31657@@ -118,7 +126,7 @@ static __init int map_switcher(void)
31658 * Now the Switcher is mapped at the right address, we can't fail!
31659 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
31660 */
31661- memcpy(switcher_vma->addr, start_switcher_text,
31662+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
31663 end_switcher_text - start_switcher_text);
31664
31665 printk(KERN_INFO "lguest: mapped switcher at %p\n",
31666diff -urNp linux-2.6.32.46/drivers/lguest/x86/core.c linux-2.6.32.46/drivers/lguest/x86/core.c
31667--- linux-2.6.32.46/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
31668+++ linux-2.6.32.46/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
31669@@ -59,7 +59,7 @@ static struct {
31670 /* Offset from where switcher.S was compiled to where we've copied it */
31671 static unsigned long switcher_offset(void)
31672 {
31673- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31674+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31675 }
31676
31677 /* This cpu's struct lguest_pages. */
31678@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
31679 * These copies are pretty cheap, so we do them unconditionally: */
31680 /* Save the current Host top-level page directory.
31681 */
31682+
31683+#ifdef CONFIG_PAX_PER_CPU_PGD
31684+ pages->state.host_cr3 = read_cr3();
31685+#else
31686 pages->state.host_cr3 = __pa(current->mm->pgd);
31687+#endif
31688+
31689 /*
31690 * Set up the Guest's page tables to see this CPU's pages (and no
31691 * other CPU's pages).
31692@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
31693 * compiled-in switcher code and the high-mapped copy we just made.
31694 */
31695 for (i = 0; i < IDT_ENTRIES; i++)
31696- default_idt_entries[i] += switcher_offset();
31697+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31698
31699 /*
31700 * Set up the Switcher's per-cpu areas.
31701@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
31702 * it will be undisturbed when we switch. To change %cs and jump we
31703 * need this structure to feed to Intel's "lcall" instruction.
31704 */
31705- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31706+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31707 lguest_entry.segment = LGUEST_CS;
31708
31709 /*
31710diff -urNp linux-2.6.32.46/drivers/lguest/x86/switcher_32.S linux-2.6.32.46/drivers/lguest/x86/switcher_32.S
31711--- linux-2.6.32.46/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
31712+++ linux-2.6.32.46/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
31713@@ -87,6 +87,7 @@
31714 #include <asm/page.h>
31715 #include <asm/segment.h>
31716 #include <asm/lguest.h>
31717+#include <asm/processor-flags.h>
31718
31719 // We mark the start of the code to copy
31720 // It's placed in .text tho it's never run here
31721@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31722 // Changes type when we load it: damn Intel!
31723 // For after we switch over our page tables
31724 // That entry will be read-only: we'd crash.
31725+
31726+#ifdef CONFIG_PAX_KERNEXEC
31727+ mov %cr0, %edx
31728+ xor $X86_CR0_WP, %edx
31729+ mov %edx, %cr0
31730+#endif
31731+
31732 movl $(GDT_ENTRY_TSS*8), %edx
31733 ltr %dx
31734
31735@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31736 // Let's clear it again for our return.
31737 // The GDT descriptor of the Host
31738 // Points to the table after two "size" bytes
31739- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31740+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31741 // Clear "used" from type field (byte 5, bit 2)
31742- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31743+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31744+
31745+#ifdef CONFIG_PAX_KERNEXEC
31746+ mov %cr0, %eax
31747+ xor $X86_CR0_WP, %eax
31748+ mov %eax, %cr0
31749+#endif
31750
31751 // Once our page table's switched, the Guest is live!
31752 // The Host fades as we run this final step.
31753@@ -295,13 +309,12 @@ deliver_to_host:
31754 // I consulted gcc, and it gave
31755 // These instructions, which I gladly credit:
31756 leal (%edx,%ebx,8), %eax
31757- movzwl (%eax),%edx
31758- movl 4(%eax), %eax
31759- xorw %ax, %ax
31760- orl %eax, %edx
31761+ movl 4(%eax), %edx
31762+ movw (%eax), %dx
31763 // Now the address of the handler's in %edx
31764 // We call it now: its "iret" drops us home.
31765- jmp *%edx
31766+ ljmp $__KERNEL_CS, $1f
31767+1: jmp *%edx
31768
31769 // Every interrupt can come to us here
31770 // But we must truly tell each apart.
31771diff -urNp linux-2.6.32.46/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.46/drivers/macintosh/via-pmu-backlight.c
31772--- linux-2.6.32.46/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
31773+++ linux-2.6.32.46/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
31774@@ -15,7 +15,7 @@
31775
31776 #define MAX_PMU_LEVEL 0xFF
31777
31778-static struct backlight_ops pmu_backlight_data;
31779+static const struct backlight_ops pmu_backlight_data;
31780 static DEFINE_SPINLOCK(pmu_backlight_lock);
31781 static int sleeping, uses_pmu_bl;
31782 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
31783@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
31784 return bd->props.brightness;
31785 }
31786
31787-static struct backlight_ops pmu_backlight_data = {
31788+static const struct backlight_ops pmu_backlight_data = {
31789 .get_brightness = pmu_backlight_get_brightness,
31790 .update_status = pmu_backlight_update_status,
31791
31792diff -urNp linux-2.6.32.46/drivers/macintosh/via-pmu.c linux-2.6.32.46/drivers/macintosh/via-pmu.c
31793--- linux-2.6.32.46/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
31794+++ linux-2.6.32.46/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
31795@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
31796 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
31797 }
31798
31799-static struct platform_suspend_ops pmu_pm_ops = {
31800+static const struct platform_suspend_ops pmu_pm_ops = {
31801 .enter = powerbook_sleep,
31802 .valid = pmu_sleep_valid,
31803 };
31804diff -urNp linux-2.6.32.46/drivers/md/dm.c linux-2.6.32.46/drivers/md/dm.c
31805--- linux-2.6.32.46/drivers/md/dm.c 2011-08-09 18:35:29.000000000 -0400
31806+++ linux-2.6.32.46/drivers/md/dm.c 2011-08-09 18:33:59.000000000 -0400
31807@@ -165,9 +165,9 @@ struct mapped_device {
31808 /*
31809 * Event handling.
31810 */
31811- atomic_t event_nr;
31812+ atomic_unchecked_t event_nr;
31813 wait_queue_head_t eventq;
31814- atomic_t uevent_seq;
31815+ atomic_unchecked_t uevent_seq;
31816 struct list_head uevent_list;
31817 spinlock_t uevent_lock; /* Protect access to uevent_list */
31818
31819@@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(i
31820 rwlock_init(&md->map_lock);
31821 atomic_set(&md->holders, 1);
31822 atomic_set(&md->open_count, 0);
31823- atomic_set(&md->event_nr, 0);
31824- atomic_set(&md->uevent_seq, 0);
31825+ atomic_set_unchecked(&md->event_nr, 0);
31826+ atomic_set_unchecked(&md->uevent_seq, 0);
31827 INIT_LIST_HEAD(&md->uevent_list);
31828 spin_lock_init(&md->uevent_lock);
31829
31830@@ -1927,7 +1927,7 @@ static void event_callback(void *context
31831
31832 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31833
31834- atomic_inc(&md->event_nr);
31835+ atomic_inc_unchecked(&md->event_nr);
31836 wake_up(&md->eventq);
31837 }
31838
31839@@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_dev
31840
31841 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31842 {
31843- return atomic_add_return(1, &md->uevent_seq);
31844+ return atomic_add_return_unchecked(1, &md->uevent_seq);
31845 }
31846
31847 uint32_t dm_get_event_nr(struct mapped_device *md)
31848 {
31849- return atomic_read(&md->event_nr);
31850+ return atomic_read_unchecked(&md->event_nr);
31851 }
31852
31853 int dm_wait_event(struct mapped_device *md, int event_nr)
31854 {
31855 return wait_event_interruptible(md->eventq,
31856- (event_nr != atomic_read(&md->event_nr)));
31857+ (event_nr != atomic_read_unchecked(&md->event_nr)));
31858 }
31859
31860 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31861diff -urNp linux-2.6.32.46/drivers/md/dm-ioctl.c linux-2.6.32.46/drivers/md/dm-ioctl.c
31862--- linux-2.6.32.46/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
31863+++ linux-2.6.32.46/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
31864@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
31865 cmd == DM_LIST_VERSIONS_CMD)
31866 return 0;
31867
31868- if ((cmd == DM_DEV_CREATE_CMD)) {
31869+ if (cmd == DM_DEV_CREATE_CMD) {
31870 if (!*param->name) {
31871 DMWARN("name not supplied when creating device");
31872 return -EINVAL;
31873diff -urNp linux-2.6.32.46/drivers/md/dm-raid1.c linux-2.6.32.46/drivers/md/dm-raid1.c
31874--- linux-2.6.32.46/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
31875+++ linux-2.6.32.46/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
31876@@ -41,7 +41,7 @@ enum dm_raid1_error {
31877
31878 struct mirror {
31879 struct mirror_set *ms;
31880- atomic_t error_count;
31881+ atomic_unchecked_t error_count;
31882 unsigned long error_type;
31883 struct dm_dev *dev;
31884 sector_t offset;
31885@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
31886 * simple way to tell if a device has encountered
31887 * errors.
31888 */
31889- atomic_inc(&m->error_count);
31890+ atomic_inc_unchecked(&m->error_count);
31891
31892 if (test_and_set_bit(error_type, &m->error_type))
31893 return;
31894@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
31895 }
31896
31897 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
31898- if (!atomic_read(&new->error_count)) {
31899+ if (!atomic_read_unchecked(&new->error_count)) {
31900 set_default_mirror(new);
31901 break;
31902 }
31903@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
31904 struct mirror *m = get_default_mirror(ms);
31905
31906 do {
31907- if (likely(!atomic_read(&m->error_count)))
31908+ if (likely(!atomic_read_unchecked(&m->error_count)))
31909 return m;
31910
31911 if (m-- == ms->mirror)
31912@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
31913 {
31914 struct mirror *default_mirror = get_default_mirror(m->ms);
31915
31916- return !atomic_read(&default_mirror->error_count);
31917+ return !atomic_read_unchecked(&default_mirror->error_count);
31918 }
31919
31920 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31921@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
31922 */
31923 if (likely(region_in_sync(ms, region, 1)))
31924 m = choose_mirror(ms, bio->bi_sector);
31925- else if (m && atomic_read(&m->error_count))
31926+ else if (m && atomic_read_unchecked(&m->error_count))
31927 m = NULL;
31928
31929 if (likely(m))
31930@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
31931 }
31932
31933 ms->mirror[mirror].ms = ms;
31934- atomic_set(&(ms->mirror[mirror].error_count), 0);
31935+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31936 ms->mirror[mirror].error_type = 0;
31937 ms->mirror[mirror].offset = offset;
31938
31939@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
31940 */
31941 static char device_status_char(struct mirror *m)
31942 {
31943- if (!atomic_read(&(m->error_count)))
31944+ if (!atomic_read_unchecked(&(m->error_count)))
31945 return 'A';
31946
31947 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
31948diff -urNp linux-2.6.32.46/drivers/md/dm-stripe.c linux-2.6.32.46/drivers/md/dm-stripe.c
31949--- linux-2.6.32.46/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
31950+++ linux-2.6.32.46/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
31951@@ -20,7 +20,7 @@ struct stripe {
31952 struct dm_dev *dev;
31953 sector_t physical_start;
31954
31955- atomic_t error_count;
31956+ atomic_unchecked_t error_count;
31957 };
31958
31959 struct stripe_c {
31960@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
31961 kfree(sc);
31962 return r;
31963 }
31964- atomic_set(&(sc->stripe[i].error_count), 0);
31965+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31966 }
31967
31968 ti->private = sc;
31969@@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
31970 DMEMIT("%d ", sc->stripes);
31971 for (i = 0; i < sc->stripes; i++) {
31972 DMEMIT("%s ", sc->stripe[i].dev->name);
31973- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31974+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31975 'D' : 'A';
31976 }
31977 buffer[i] = '\0';
31978@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
31979 */
31980 for (i = 0; i < sc->stripes; i++)
31981 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31982- atomic_inc(&(sc->stripe[i].error_count));
31983- if (atomic_read(&(sc->stripe[i].error_count)) <
31984+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
31985+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31986 DM_IO_ERROR_THRESHOLD)
31987 queue_work(kstriped, &sc->kstriped_ws);
31988 }
31989diff -urNp linux-2.6.32.46/drivers/md/dm-sysfs.c linux-2.6.32.46/drivers/md/dm-sysfs.c
31990--- linux-2.6.32.46/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
31991+++ linux-2.6.32.46/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
31992@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
31993 NULL,
31994 };
31995
31996-static struct sysfs_ops dm_sysfs_ops = {
31997+static const struct sysfs_ops dm_sysfs_ops = {
31998 .show = dm_attr_show,
31999 };
32000
32001diff -urNp linux-2.6.32.46/drivers/md/dm-table.c linux-2.6.32.46/drivers/md/dm-table.c
32002--- linux-2.6.32.46/drivers/md/dm-table.c 2011-06-25 12:55:34.000000000 -0400
32003+++ linux-2.6.32.46/drivers/md/dm-table.c 2011-06-25 12:56:37.000000000 -0400
32004@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
32005 if (!dev_size)
32006 return 0;
32007
32008- if ((start >= dev_size) || (start + len > dev_size)) {
32009+ if ((start >= dev_size) || (len > dev_size - start)) {
32010 DMWARN("%s: %s too small for target: "
32011 "start=%llu, len=%llu, dev_size=%llu",
32012 dm_device_name(ti->table->md), bdevname(bdev, b),
32013diff -urNp linux-2.6.32.46/drivers/md/md.c linux-2.6.32.46/drivers/md/md.c
32014--- linux-2.6.32.46/drivers/md/md.c 2011-07-13 17:23:04.000000000 -0400
32015+++ linux-2.6.32.46/drivers/md/md.c 2011-07-13 17:23:18.000000000 -0400
32016@@ -153,10 +153,10 @@ static int start_readonly;
32017 * start build, activate spare
32018 */
32019 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
32020-static atomic_t md_event_count;
32021+static atomic_unchecked_t md_event_count;
32022 void md_new_event(mddev_t *mddev)
32023 {
32024- atomic_inc(&md_event_count);
32025+ atomic_inc_unchecked(&md_event_count);
32026 wake_up(&md_event_waiters);
32027 }
32028 EXPORT_SYMBOL_GPL(md_new_event);
32029@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
32030 */
32031 static void md_new_event_inintr(mddev_t *mddev)
32032 {
32033- atomic_inc(&md_event_count);
32034+ atomic_inc_unchecked(&md_event_count);
32035 wake_up(&md_event_waiters);
32036 }
32037
32038@@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
32039
32040 rdev->preferred_minor = 0xffff;
32041 rdev->data_offset = le64_to_cpu(sb->data_offset);
32042- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32043+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32044
32045 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
32046 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
32047@@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
32048 else
32049 sb->resync_offset = cpu_to_le64(0);
32050
32051- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
32052+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
32053
32054 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
32055 sb->size = cpu_to_le64(mddev->dev_sectors);
32056@@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
32057 static ssize_t
32058 errors_show(mdk_rdev_t *rdev, char *page)
32059 {
32060- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
32061+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
32062 }
32063
32064 static ssize_t
32065@@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
32066 char *e;
32067 unsigned long n = simple_strtoul(buf, &e, 10);
32068 if (*buf && (*e == 0 || *e == '\n')) {
32069- atomic_set(&rdev->corrected_errors, n);
32070+ atomic_set_unchecked(&rdev->corrected_errors, n);
32071 return len;
32072 }
32073 return -EINVAL;
32074@@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
32075 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
32076 kfree(rdev);
32077 }
32078-static struct sysfs_ops rdev_sysfs_ops = {
32079+static const struct sysfs_ops rdev_sysfs_ops = {
32080 .show = rdev_attr_show,
32081 .store = rdev_attr_store,
32082 };
32083@@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
32084 rdev->data_offset = 0;
32085 rdev->sb_events = 0;
32086 atomic_set(&rdev->nr_pending, 0);
32087- atomic_set(&rdev->read_errors, 0);
32088- atomic_set(&rdev->corrected_errors, 0);
32089+ atomic_set_unchecked(&rdev->read_errors, 0);
32090+ atomic_set_unchecked(&rdev->corrected_errors, 0);
32091
32092 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
32093 if (!size) {
32094@@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
32095 kfree(mddev);
32096 }
32097
32098-static struct sysfs_ops md_sysfs_ops = {
32099+static const struct sysfs_ops md_sysfs_ops = {
32100 .show = md_attr_show,
32101 .store = md_attr_store,
32102 };
32103@@ -4474,7 +4474,8 @@ out:
32104 err = 0;
32105 blk_integrity_unregister(disk);
32106 md_new_event(mddev);
32107- sysfs_notify_dirent(mddev->sysfs_state);
32108+ if (mddev->sysfs_state)
32109+ sysfs_notify_dirent(mddev->sysfs_state);
32110 return err;
32111 }
32112
32113@@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
32114
32115 spin_unlock(&pers_lock);
32116 seq_printf(seq, "\n");
32117- mi->event = atomic_read(&md_event_count);
32118+ mi->event = atomic_read_unchecked(&md_event_count);
32119 return 0;
32120 }
32121 if (v == (void*)2) {
32122@@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
32123 chunk_kb ? "KB" : "B");
32124 if (bitmap->file) {
32125 seq_printf(seq, ", file: ");
32126- seq_path(seq, &bitmap->file->f_path, " \t\n");
32127+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
32128 }
32129
32130 seq_printf(seq, "\n");
32131@@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
32132 else {
32133 struct seq_file *p = file->private_data;
32134 p->private = mi;
32135- mi->event = atomic_read(&md_event_count);
32136+ mi->event = atomic_read_unchecked(&md_event_count);
32137 }
32138 return error;
32139 }
32140@@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
32141 /* always allow read */
32142 mask = POLLIN | POLLRDNORM;
32143
32144- if (mi->event != atomic_read(&md_event_count))
32145+ if (mi->event != atomic_read_unchecked(&md_event_count))
32146 mask |= POLLERR | POLLPRI;
32147 return mask;
32148 }
32149@@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
32150 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
32151 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
32152 (int)part_stat_read(&disk->part0, sectors[1]) -
32153- atomic_read(&disk->sync_io);
32154+ atomic_read_unchecked(&disk->sync_io);
32155 /* sync IO will cause sync_io to increase before the disk_stats
32156 * as sync_io is counted when a request starts, and
32157 * disk_stats is counted when it completes.
32158diff -urNp linux-2.6.32.46/drivers/md/md.h linux-2.6.32.46/drivers/md/md.h
32159--- linux-2.6.32.46/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
32160+++ linux-2.6.32.46/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
32161@@ -94,10 +94,10 @@ struct mdk_rdev_s
32162 * only maintained for arrays that
32163 * support hot removal
32164 */
32165- atomic_t read_errors; /* number of consecutive read errors that
32166+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
32167 * we have tried to ignore.
32168 */
32169- atomic_t corrected_errors; /* number of corrected read errors,
32170+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
32171 * for reporting to userspace and storing
32172 * in superblock.
32173 */
32174@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
32175
32176 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
32177 {
32178- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32179+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32180 }
32181
32182 struct mdk_personality
32183diff -urNp linux-2.6.32.46/drivers/md/raid10.c linux-2.6.32.46/drivers/md/raid10.c
32184--- linux-2.6.32.46/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
32185+++ linux-2.6.32.46/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
32186@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
32187 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
32188 set_bit(R10BIO_Uptodate, &r10_bio->state);
32189 else {
32190- atomic_add(r10_bio->sectors,
32191+ atomic_add_unchecked(r10_bio->sectors,
32192 &conf->mirrors[d].rdev->corrected_errors);
32193 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
32194 md_error(r10_bio->mddev,
32195@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
32196 test_bit(In_sync, &rdev->flags)) {
32197 atomic_inc(&rdev->nr_pending);
32198 rcu_read_unlock();
32199- atomic_add(s, &rdev->corrected_errors);
32200+ atomic_add_unchecked(s, &rdev->corrected_errors);
32201 if (sync_page_io(rdev->bdev,
32202 r10_bio->devs[sl].addr +
32203 sect + rdev->data_offset,
32204diff -urNp linux-2.6.32.46/drivers/md/raid1.c linux-2.6.32.46/drivers/md/raid1.c
32205--- linux-2.6.32.46/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
32206+++ linux-2.6.32.46/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
32207@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
32208 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
32209 continue;
32210 rdev = conf->mirrors[d].rdev;
32211- atomic_add(s, &rdev->corrected_errors);
32212+ atomic_add_unchecked(s, &rdev->corrected_errors);
32213 if (sync_page_io(rdev->bdev,
32214 sect + rdev->data_offset,
32215 s<<9,
32216@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
32217 /* Well, this device is dead */
32218 md_error(mddev, rdev);
32219 else {
32220- atomic_add(s, &rdev->corrected_errors);
32221+ atomic_add_unchecked(s, &rdev->corrected_errors);
32222 printk(KERN_INFO
32223 "raid1:%s: read error corrected "
32224 "(%d sectors at %llu on %s)\n",
32225diff -urNp linux-2.6.32.46/drivers/md/raid5.c linux-2.6.32.46/drivers/md/raid5.c
32226--- linux-2.6.32.46/drivers/md/raid5.c 2011-06-25 12:55:34.000000000 -0400
32227+++ linux-2.6.32.46/drivers/md/raid5.c 2011-06-25 12:58:39.000000000 -0400
32228@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
32229 bi->bi_next = NULL;
32230 if ((rw & WRITE) &&
32231 test_bit(R5_ReWrite, &sh->dev[i].flags))
32232- atomic_add(STRIPE_SECTORS,
32233+ atomic_add_unchecked(STRIPE_SECTORS,
32234 &rdev->corrected_errors);
32235 generic_make_request(bi);
32236 } else {
32237@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
32238 clear_bit(R5_ReadError, &sh->dev[i].flags);
32239 clear_bit(R5_ReWrite, &sh->dev[i].flags);
32240 }
32241- if (atomic_read(&conf->disks[i].rdev->read_errors))
32242- atomic_set(&conf->disks[i].rdev->read_errors, 0);
32243+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
32244+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
32245 } else {
32246 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
32247 int retry = 0;
32248 rdev = conf->disks[i].rdev;
32249
32250 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
32251- atomic_inc(&rdev->read_errors);
32252+ atomic_inc_unchecked(&rdev->read_errors);
32253 if (conf->mddev->degraded >= conf->max_degraded)
32254 printk_rl(KERN_WARNING
32255 "raid5:%s: read error not correctable "
32256@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
32257 (unsigned long long)(sh->sector
32258 + rdev->data_offset),
32259 bdn);
32260- else if (atomic_read(&rdev->read_errors)
32261+ else if (atomic_read_unchecked(&rdev->read_errors)
32262 > conf->max_nr_stripes)
32263 printk(KERN_WARNING
32264 "raid5:%s: Too many read errors, failing device %s.\n",
32265@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
32266 sector_t r_sector;
32267 struct stripe_head sh2;
32268
32269+ pax_track_stack();
32270
32271 chunk_offset = sector_div(new_sector, sectors_per_chunk);
32272 stripe = new_sector;
32273diff -urNp linux-2.6.32.46/drivers/media/common/saa7146_hlp.c linux-2.6.32.46/drivers/media/common/saa7146_hlp.c
32274--- linux-2.6.32.46/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
32275+++ linux-2.6.32.46/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
32276@@ -353,6 +353,8 @@ static void calculate_clipping_registers
32277
32278 int x[32], y[32], w[32], h[32];
32279
32280+ pax_track_stack();
32281+
32282 /* clear out memory */
32283 memset(&line_list[0], 0x00, sizeof(u32)*32);
32284 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
32285diff -urNp linux-2.6.32.46/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.46/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
32286--- linux-2.6.32.46/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
32287+++ linux-2.6.32.46/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
32288@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
32289 u8 buf[HOST_LINK_BUF_SIZE];
32290 int i;
32291
32292+ pax_track_stack();
32293+
32294 dprintk("%s\n", __func__);
32295
32296 /* check if we have space for a link buf in the rx_buffer */
32297@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
32298 unsigned long timeout;
32299 int written;
32300
32301+ pax_track_stack();
32302+
32303 dprintk("%s\n", __func__);
32304
32305 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
32306diff -urNp linux-2.6.32.46/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.32.46/drivers/media/dvb/dvb-core/dvb_demux.h
32307--- linux-2.6.32.46/drivers/media/dvb/dvb-core/dvb_demux.h 2011-03-27 14:31:47.000000000 -0400
32308+++ linux-2.6.32.46/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-05 20:33:55.000000000 -0400
32309@@ -71,7 +71,7 @@ struct dvb_demux_feed {
32310 union {
32311 dmx_ts_cb ts;
32312 dmx_section_cb sec;
32313- } cb;
32314+ } __no_const cb;
32315
32316 struct dvb_demux *demux;
32317 void *priv;
32318diff -urNp linux-2.6.32.46/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.46/drivers/media/dvb/dvb-core/dvbdev.c
32319--- linux-2.6.32.46/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
32320+++ linux-2.6.32.46/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-23 21:22:32.000000000 -0400
32321@@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapt
32322 const struct dvb_device *template, void *priv, int type)
32323 {
32324 struct dvb_device *dvbdev;
32325- struct file_operations *dvbdevfops;
32326+ file_operations_no_const *dvbdevfops;
32327 struct device *clsdev;
32328 int minor;
32329 int id;
32330diff -urNp linux-2.6.32.46/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.32.46/drivers/media/dvb/dvb-usb/cxusb.c
32331--- linux-2.6.32.46/drivers/media/dvb/dvb-usb/cxusb.c 2011-03-27 14:31:47.000000000 -0400
32332+++ linux-2.6.32.46/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-05 20:33:55.000000000 -0400
32333@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_di
32334 struct dib0700_adapter_state {
32335 int (*set_param_save) (struct dvb_frontend *,
32336 struct dvb_frontend_parameters *);
32337-};
32338+} __no_const;
32339
32340 static int dib7070_set_param_override(struct dvb_frontend *fe,
32341 struct dvb_frontend_parameters *fep)
32342diff -urNp linux-2.6.32.46/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.46/drivers/media/dvb/dvb-usb/dib0700_core.c
32343--- linux-2.6.32.46/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
32344+++ linux-2.6.32.46/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
32345@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
32346
32347 u8 buf[260];
32348
32349+ pax_track_stack();
32350+
32351 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
32352 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
32353
32354diff -urNp linux-2.6.32.46/drivers/media/dvb/dvb-usb/dib0700_devices.c linux-2.6.32.46/drivers/media/dvb/dvb-usb/dib0700_devices.c
32355--- linux-2.6.32.46/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-05-10 22:12:01.000000000 -0400
32356+++ linux-2.6.32.46/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-08-05 20:33:55.000000000 -0400
32357@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "
32358
32359 struct dib0700_adapter_state {
32360 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
32361-};
32362+} __no_const;
32363
32364 /* Hauppauge Nova-T 500 (aka Bristol)
32365 * has a LNA on GPIO0 which is enabled by setting 1 */
32366diff -urNp linux-2.6.32.46/drivers/media/dvb/frontends/dib3000.h linux-2.6.32.46/drivers/media/dvb/frontends/dib3000.h
32367--- linux-2.6.32.46/drivers/media/dvb/frontends/dib3000.h 2011-03-27 14:31:47.000000000 -0400
32368+++ linux-2.6.32.46/drivers/media/dvb/frontends/dib3000.h 2011-08-05 20:33:55.000000000 -0400
32369@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
32370 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
32371 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
32372 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
32373-};
32374+} __no_const;
32375
32376 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
32377 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
32378diff -urNp linux-2.6.32.46/drivers/media/dvb/frontends/or51211.c linux-2.6.32.46/drivers/media/dvb/frontends/or51211.c
32379--- linux-2.6.32.46/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
32380+++ linux-2.6.32.46/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
32381@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
32382 u8 tudata[585];
32383 int i;
32384
32385+ pax_track_stack();
32386+
32387 dprintk("Firmware is %zd bytes\n",fw->size);
32388
32389 /* Get eprom data */
32390diff -urNp linux-2.6.32.46/drivers/media/radio/radio-cadet.c linux-2.6.32.46/drivers/media/radio/radio-cadet.c
32391--- linux-2.6.32.46/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
32392+++ linux-2.6.32.46/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
32393@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
32394 while (i < count && dev->rdsin != dev->rdsout)
32395 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
32396
32397- if (copy_to_user(data, readbuf, i))
32398+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
32399 return -EFAULT;
32400 return i;
32401 }
32402diff -urNp linux-2.6.32.46/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.46/drivers/media/video/cx18/cx18-driver.c
32403--- linux-2.6.32.46/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
32404+++ linux-2.6.32.46/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
32405@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
32406
32407 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
32408
32409-static atomic_t cx18_instance = ATOMIC_INIT(0);
32410+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
32411
32412 /* Parameter declarations */
32413 static int cardtype[CX18_MAX_CARDS];
32414@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
32415 struct i2c_client c;
32416 u8 eedata[256];
32417
32418+ pax_track_stack();
32419+
32420 memset(&c, 0, sizeof(c));
32421 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
32422 c.adapter = &cx->i2c_adap[0];
32423@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
32424 struct cx18 *cx;
32425
32426 /* FIXME - module parameter arrays constrain max instances */
32427- i = atomic_inc_return(&cx18_instance) - 1;
32428+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
32429 if (i >= CX18_MAX_CARDS) {
32430 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
32431 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
32432diff -urNp linux-2.6.32.46/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.46/drivers/media/video/ivtv/ivtv-driver.c
32433--- linux-2.6.32.46/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
32434+++ linux-2.6.32.46/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
32435@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
32436 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
32437
32438 /* ivtv instance counter */
32439-static atomic_t ivtv_instance = ATOMIC_INIT(0);
32440+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
32441
32442 /* Parameter declarations */
32443 static int cardtype[IVTV_MAX_CARDS];
32444diff -urNp linux-2.6.32.46/drivers/media/video/omap24xxcam.c linux-2.6.32.46/drivers/media/video/omap24xxcam.c
32445--- linux-2.6.32.46/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
32446+++ linux-2.6.32.46/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
32447@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
32448 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
32449
32450 do_gettimeofday(&vb->ts);
32451- vb->field_count = atomic_add_return(2, &fh->field_count);
32452+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
32453 if (csr & csr_error) {
32454 vb->state = VIDEOBUF_ERROR;
32455 if (!atomic_read(&fh->cam->in_reset)) {
32456diff -urNp linux-2.6.32.46/drivers/media/video/omap24xxcam.h linux-2.6.32.46/drivers/media/video/omap24xxcam.h
32457--- linux-2.6.32.46/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
32458+++ linux-2.6.32.46/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
32459@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
32460 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
32461 struct videobuf_queue vbq;
32462 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
32463- atomic_t field_count; /* field counter for videobuf_buffer */
32464+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
32465 /* accessing cam here doesn't need serialisation: it's constant */
32466 struct omap24xxcam_device *cam;
32467 };
32468diff -urNp linux-2.6.32.46/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.46/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
32469--- linux-2.6.32.46/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
32470+++ linux-2.6.32.46/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
32471@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
32472 u8 *eeprom;
32473 struct tveeprom tvdata;
32474
32475+ pax_track_stack();
32476+
32477 memset(&tvdata,0,sizeof(tvdata));
32478
32479 eeprom = pvr2_eeprom_fetch(hdw);
32480diff -urNp linux-2.6.32.46/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h linux-2.6.32.46/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
32481--- linux-2.6.32.46/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-03-27 14:31:47.000000000 -0400
32482+++ linux-2.6.32.46/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-08-23 21:22:38.000000000 -0400
32483@@ -195,7 +195,7 @@ struct pvr2_hdw {
32484
32485 /* I2C stuff */
32486 struct i2c_adapter i2c_adap;
32487- struct i2c_algorithm i2c_algo;
32488+ i2c_algorithm_no_const i2c_algo;
32489 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
32490 int i2c_cx25840_hack_state;
32491 int i2c_linked;
32492diff -urNp linux-2.6.32.46/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.46/drivers/media/video/saa7134/saa6752hs.c
32493--- linux-2.6.32.46/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
32494+++ linux-2.6.32.46/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
32495@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
32496 unsigned char localPAT[256];
32497 unsigned char localPMT[256];
32498
32499+ pax_track_stack();
32500+
32501 /* Set video format - must be done first as it resets other settings */
32502 set_reg8(client, 0x41, h->video_format);
32503
32504diff -urNp linux-2.6.32.46/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.46/drivers/media/video/saa7164/saa7164-cmd.c
32505--- linux-2.6.32.46/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
32506+++ linux-2.6.32.46/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
32507@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
32508 wait_queue_head_t *q = 0;
32509 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32510
32511+ pax_track_stack();
32512+
32513 /* While any outstand message on the bus exists... */
32514 do {
32515
32516@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
32517 u8 tmp[512];
32518 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32519
32520+ pax_track_stack();
32521+
32522 while (loop) {
32523
32524 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
32525diff -urNp linux-2.6.32.46/drivers/media/video/usbvideo/ibmcam.c linux-2.6.32.46/drivers/media/video/usbvideo/ibmcam.c
32526--- linux-2.6.32.46/drivers/media/video/usbvideo/ibmcam.c 2011-03-27 14:31:47.000000000 -0400
32527+++ linux-2.6.32.46/drivers/media/video/usbvideo/ibmcam.c 2011-08-05 20:33:55.000000000 -0400
32528@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] =
32529 static int __init ibmcam_init(void)
32530 {
32531 struct usbvideo_cb cbTbl;
32532- memset(&cbTbl, 0, sizeof(cbTbl));
32533- cbTbl.probe = ibmcam_probe;
32534- cbTbl.setupOnOpen = ibmcam_setup_on_open;
32535- cbTbl.videoStart = ibmcam_video_start;
32536- cbTbl.videoStop = ibmcam_video_stop;
32537- cbTbl.processData = ibmcam_ProcessIsocData;
32538- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32539- cbTbl.adjustPicture = ibmcam_adjust_picture;
32540- cbTbl.getFPS = ibmcam_calculate_fps;
32541+ memset((void *)&cbTbl, 0, sizeof(cbTbl));
32542+ *(void **)&cbTbl.probe = ibmcam_probe;
32543+ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
32544+ *(void **)&cbTbl.videoStart = ibmcam_video_start;
32545+ *(void **)&cbTbl.videoStop = ibmcam_video_stop;
32546+ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
32547+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32548+ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
32549+ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
32550 return usbvideo_register(
32551 &cams,
32552 MAX_IBMCAM,
32553diff -urNp linux-2.6.32.46/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.46/drivers/media/video/usbvideo/konicawc.c
32554--- linux-2.6.32.46/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
32555+++ linux-2.6.32.46/drivers/media/video/usbvideo/konicawc.c 2011-08-05 20:33:55.000000000 -0400
32556@@ -225,7 +225,7 @@ static void konicawc_register_input(stru
32557 int error;
32558
32559 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32560- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32561+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32562
32563 cam->input = input_dev = input_allocate_device();
32564 if (!input_dev) {
32565@@ -935,16 +935,16 @@ static int __init konicawc_init(void)
32566 struct usbvideo_cb cbTbl;
32567 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
32568 DRIVER_DESC "\n");
32569- memset(&cbTbl, 0, sizeof(cbTbl));
32570- cbTbl.probe = konicawc_probe;
32571- cbTbl.setupOnOpen = konicawc_setup_on_open;
32572- cbTbl.processData = konicawc_process_isoc;
32573- cbTbl.getFPS = konicawc_calculate_fps;
32574- cbTbl.setVideoMode = konicawc_set_video_mode;
32575- cbTbl.startDataPump = konicawc_start_data;
32576- cbTbl.stopDataPump = konicawc_stop_data;
32577- cbTbl.adjustPicture = konicawc_adjust_picture;
32578- cbTbl.userFree = konicawc_free_uvd;
32579+ memset((void * )&cbTbl, 0, sizeof(cbTbl));
32580+ *(void **)&cbTbl.probe = konicawc_probe;
32581+ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
32582+ *(void **)&cbTbl.processData = konicawc_process_isoc;
32583+ *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
32584+ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
32585+ *(void **)&cbTbl.startDataPump = konicawc_start_data;
32586+ *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
32587+ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
32588+ *(void **)&cbTbl.userFree = konicawc_free_uvd;
32589 return usbvideo_register(
32590 &cams,
32591 MAX_CAMERAS,
32592diff -urNp linux-2.6.32.46/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.46/drivers/media/video/usbvideo/quickcam_messenger.c
32593--- linux-2.6.32.46/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
32594+++ linux-2.6.32.46/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
32595@@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
32596 int error;
32597
32598 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32599- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32600+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32601
32602 cam->input = input_dev = input_allocate_device();
32603 if (!input_dev) {
32604diff -urNp linux-2.6.32.46/drivers/media/video/usbvideo/ultracam.c linux-2.6.32.46/drivers/media/video/usbvideo/ultracam.c
32605--- linux-2.6.32.46/drivers/media/video/usbvideo/ultracam.c 2011-03-27 14:31:47.000000000 -0400
32606+++ linux-2.6.32.46/drivers/media/video/usbvideo/ultracam.c 2011-08-05 20:33:55.000000000 -0400
32607@@ -655,14 +655,14 @@ static int __init ultracam_init(void)
32608 {
32609 struct usbvideo_cb cbTbl;
32610 memset(&cbTbl, 0, sizeof(cbTbl));
32611- cbTbl.probe = ultracam_probe;
32612- cbTbl.setupOnOpen = ultracam_setup_on_open;
32613- cbTbl.videoStart = ultracam_video_start;
32614- cbTbl.videoStop = ultracam_video_stop;
32615- cbTbl.processData = ultracam_ProcessIsocData;
32616- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32617- cbTbl.adjustPicture = ultracam_adjust_picture;
32618- cbTbl.getFPS = ultracam_calculate_fps;
32619+ *(void **)&cbTbl.probe = ultracam_probe;
32620+ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
32621+ *(void **)&cbTbl.videoStart = ultracam_video_start;
32622+ *(void **)&cbTbl.videoStop = ultracam_video_stop;
32623+ *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
32624+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32625+ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
32626+ *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
32627 return usbvideo_register(
32628 &cams,
32629 MAX_CAMERAS,
32630diff -urNp linux-2.6.32.46/drivers/media/video/usbvideo/usbvideo.c linux-2.6.32.46/drivers/media/video/usbvideo/usbvideo.c
32631--- linux-2.6.32.46/drivers/media/video/usbvideo/usbvideo.c 2011-03-27 14:31:47.000000000 -0400
32632+++ linux-2.6.32.46/drivers/media/video/usbvideo/usbvideo.c 2011-08-05 20:33:55.000000000 -0400
32633@@ -697,15 +697,15 @@ int usbvideo_register(
32634 __func__, cams, base_size, num_cams);
32635
32636 /* Copy callbacks, apply defaults for those that are not set */
32637- memmove(&cams->cb, cbTbl, sizeof(cams->cb));
32638+ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
32639 if (cams->cb.getFrame == NULL)
32640- cams->cb.getFrame = usbvideo_GetFrame;
32641+ *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
32642 if (cams->cb.disconnect == NULL)
32643- cams->cb.disconnect = usbvideo_Disconnect;
32644+ *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
32645 if (cams->cb.startDataPump == NULL)
32646- cams->cb.startDataPump = usbvideo_StartDataPump;
32647+ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
32648 if (cams->cb.stopDataPump == NULL)
32649- cams->cb.stopDataPump = usbvideo_StopDataPump;
32650+ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
32651
32652 cams->num_cameras = num_cams;
32653 cams->cam = (struct uvd *) &cams[1];
32654diff -urNp linux-2.6.32.46/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.46/drivers/media/video/usbvision/usbvision-core.c
32655--- linux-2.6.32.46/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
32656+++ linux-2.6.32.46/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
32657@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
32658 unsigned char rv, gv, bv;
32659 static unsigned char *Y, *U, *V;
32660
32661+ pax_track_stack();
32662+
32663 frame = usbvision->curFrame;
32664 imageSize = frame->frmwidth * frame->frmheight;
32665 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
32666diff -urNp linux-2.6.32.46/drivers/media/video/v4l2-device.c linux-2.6.32.46/drivers/media/video/v4l2-device.c
32667--- linux-2.6.32.46/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
32668+++ linux-2.6.32.46/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
32669@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
32670 EXPORT_SYMBOL_GPL(v4l2_device_register);
32671
32672 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
32673- atomic_t *instance)
32674+ atomic_unchecked_t *instance)
32675 {
32676- int num = atomic_inc_return(instance) - 1;
32677+ int num = atomic_inc_return_unchecked(instance) - 1;
32678 int len = strlen(basename);
32679
32680 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
32681diff -urNp linux-2.6.32.46/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.46/drivers/media/video/videobuf-dma-sg.c
32682--- linux-2.6.32.46/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
32683+++ linux-2.6.32.46/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
32684@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
32685 {
32686 struct videobuf_queue q;
32687
32688+ pax_track_stack();
32689+
32690 /* Required to make generic handler to call __videobuf_alloc */
32691 q.int_ops = &sg_ops;
32692
32693diff -urNp linux-2.6.32.46/drivers/message/fusion/mptbase.c linux-2.6.32.46/drivers/message/fusion/mptbase.c
32694--- linux-2.6.32.46/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
32695+++ linux-2.6.32.46/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
32696@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
32697 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
32698 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
32699
32700+#ifdef CONFIG_GRKERNSEC_HIDESYM
32701+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32702+ NULL, NULL);
32703+#else
32704 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32705 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
32706+#endif
32707+
32708 /*
32709 * Rounding UP to nearest 4-kB boundary here...
32710 */
32711diff -urNp linux-2.6.32.46/drivers/message/fusion/mptsas.c linux-2.6.32.46/drivers/message/fusion/mptsas.c
32712--- linux-2.6.32.46/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
32713+++ linux-2.6.32.46/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
32714@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
32715 return 0;
32716 }
32717
32718+static inline void
32719+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32720+{
32721+ if (phy_info->port_details) {
32722+ phy_info->port_details->rphy = rphy;
32723+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32724+ ioc->name, rphy));
32725+ }
32726+
32727+ if (rphy) {
32728+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32729+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32730+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32731+ ioc->name, rphy, rphy->dev.release));
32732+ }
32733+}
32734+
32735 /* no mutex */
32736 static void
32737 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
32738@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
32739 return NULL;
32740 }
32741
32742-static inline void
32743-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32744-{
32745- if (phy_info->port_details) {
32746- phy_info->port_details->rphy = rphy;
32747- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32748- ioc->name, rphy));
32749- }
32750-
32751- if (rphy) {
32752- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32753- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32754- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32755- ioc->name, rphy, rphy->dev.release));
32756- }
32757-}
32758-
32759 static inline struct sas_port *
32760 mptsas_get_port(struct mptsas_phyinfo *phy_info)
32761 {
32762diff -urNp linux-2.6.32.46/drivers/message/fusion/mptscsih.c linux-2.6.32.46/drivers/message/fusion/mptscsih.c
32763--- linux-2.6.32.46/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
32764+++ linux-2.6.32.46/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
32765@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
32766
32767 h = shost_priv(SChost);
32768
32769- if (h) {
32770- if (h->info_kbuf == NULL)
32771- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32772- return h->info_kbuf;
32773- h->info_kbuf[0] = '\0';
32774+ if (!h)
32775+ return NULL;
32776
32777- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32778- h->info_kbuf[size-1] = '\0';
32779- }
32780+ if (h->info_kbuf == NULL)
32781+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32782+ return h->info_kbuf;
32783+ h->info_kbuf[0] = '\0';
32784+
32785+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32786+ h->info_kbuf[size-1] = '\0';
32787
32788 return h->info_kbuf;
32789 }
32790diff -urNp linux-2.6.32.46/drivers/message/i2o/i2o_config.c linux-2.6.32.46/drivers/message/i2o/i2o_config.c
32791--- linux-2.6.32.46/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
32792+++ linux-2.6.32.46/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
32793@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
32794 struct i2o_message *msg;
32795 unsigned int iop;
32796
32797+ pax_track_stack();
32798+
32799 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
32800 return -EFAULT;
32801
32802diff -urNp linux-2.6.32.46/drivers/message/i2o/i2o_proc.c linux-2.6.32.46/drivers/message/i2o/i2o_proc.c
32803--- linux-2.6.32.46/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
32804+++ linux-2.6.32.46/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
32805@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
32806 "Array Controller Device"
32807 };
32808
32809-static char *chtostr(u8 * chars, int n)
32810-{
32811- char tmp[256];
32812- tmp[0] = 0;
32813- return strncat(tmp, (char *)chars, n);
32814-}
32815-
32816 static int i2o_report_query_status(struct seq_file *seq, int block_status,
32817 char *group)
32818 {
32819@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
32820
32821 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
32822 seq_printf(seq, "%-#8x", ddm_table.module_id);
32823- seq_printf(seq, "%-29s",
32824- chtostr(ddm_table.module_name_version, 28));
32825+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
32826 seq_printf(seq, "%9d ", ddm_table.data_size);
32827 seq_printf(seq, "%8d", ddm_table.code_size);
32828
32829@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
32830
32831 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
32832 seq_printf(seq, "%-#8x", dst->module_id);
32833- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
32834- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
32835+ seq_printf(seq, "%-.28s", dst->module_name_version);
32836+ seq_printf(seq, "%-.8s", dst->date);
32837 seq_printf(seq, "%8d ", dst->module_size);
32838 seq_printf(seq, "%8d ", dst->mpb_size);
32839 seq_printf(seq, "0x%04x", dst->module_flags);
32840@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
32841 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
32842 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
32843 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
32844- seq_printf(seq, "Vendor info : %s\n",
32845- chtostr((u8 *) (work32 + 2), 16));
32846- seq_printf(seq, "Product info : %s\n",
32847- chtostr((u8 *) (work32 + 6), 16));
32848- seq_printf(seq, "Description : %s\n",
32849- chtostr((u8 *) (work32 + 10), 16));
32850- seq_printf(seq, "Product rev. : %s\n",
32851- chtostr((u8 *) (work32 + 14), 8));
32852+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
32853+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
32854+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
32855+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
32856
32857 seq_printf(seq, "Serial number : ");
32858 print_serial_number(seq, (u8 *) (work32 + 16),
32859@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
32860 }
32861
32862 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
32863- seq_printf(seq, "Module name : %s\n",
32864- chtostr(result.module_name, 24));
32865- seq_printf(seq, "Module revision : %s\n",
32866- chtostr(result.module_rev, 8));
32867+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
32868+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
32869
32870 seq_printf(seq, "Serial number : ");
32871 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
32872@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
32873 return 0;
32874 }
32875
32876- seq_printf(seq, "Device name : %s\n",
32877- chtostr(result.device_name, 64));
32878- seq_printf(seq, "Service name : %s\n",
32879- chtostr(result.service_name, 64));
32880- seq_printf(seq, "Physical name : %s\n",
32881- chtostr(result.physical_location, 64));
32882- seq_printf(seq, "Instance number : %s\n",
32883- chtostr(result.instance_number, 4));
32884+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
32885+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
32886+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32887+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32888
32889 return 0;
32890 }
32891diff -urNp linux-2.6.32.46/drivers/message/i2o/iop.c linux-2.6.32.46/drivers/message/i2o/iop.c
32892--- linux-2.6.32.46/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
32893+++ linux-2.6.32.46/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
32894@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
32895
32896 spin_lock_irqsave(&c->context_list_lock, flags);
32897
32898- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32899- atomic_inc(&c->context_list_counter);
32900+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32901+ atomic_inc_unchecked(&c->context_list_counter);
32902
32903- entry->context = atomic_read(&c->context_list_counter);
32904+ entry->context = atomic_read_unchecked(&c->context_list_counter);
32905
32906 list_add(&entry->list, &c->context_list);
32907
32908@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
32909
32910 #if BITS_PER_LONG == 64
32911 spin_lock_init(&c->context_list_lock);
32912- atomic_set(&c->context_list_counter, 0);
32913+ atomic_set_unchecked(&c->context_list_counter, 0);
32914 INIT_LIST_HEAD(&c->context_list);
32915 #endif
32916
32917diff -urNp linux-2.6.32.46/drivers/mfd/wm8350-i2c.c linux-2.6.32.46/drivers/mfd/wm8350-i2c.c
32918--- linux-2.6.32.46/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
32919+++ linux-2.6.32.46/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
32920@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
32921 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
32922 int ret;
32923
32924+ pax_track_stack();
32925+
32926 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
32927 return -EINVAL;
32928
32929diff -urNp linux-2.6.32.46/drivers/misc/kgdbts.c linux-2.6.32.46/drivers/misc/kgdbts.c
32930--- linux-2.6.32.46/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
32931+++ linux-2.6.32.46/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
32932@@ -118,7 +118,7 @@
32933 } while (0)
32934 #define MAX_CONFIG_LEN 40
32935
32936-static struct kgdb_io kgdbts_io_ops;
32937+static const struct kgdb_io kgdbts_io_ops;
32938 static char get_buf[BUFMAX];
32939 static int get_buf_cnt;
32940 static char put_buf[BUFMAX];
32941@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
32942 module_put(THIS_MODULE);
32943 }
32944
32945-static struct kgdb_io kgdbts_io_ops = {
32946+static const struct kgdb_io kgdbts_io_ops = {
32947 .name = "kgdbts",
32948 .read_char = kgdbts_get_char,
32949 .write_char = kgdbts_put_char,
32950diff -urNp linux-2.6.32.46/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.46/drivers/misc/sgi-gru/gruhandles.c
32951--- linux-2.6.32.46/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
32952+++ linux-2.6.32.46/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
32953@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
32954
32955 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32956 {
32957- atomic_long_inc(&mcs_op_statistics[op].count);
32958- atomic_long_add(clks, &mcs_op_statistics[op].total);
32959+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32960+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
32961 if (mcs_op_statistics[op].max < clks)
32962 mcs_op_statistics[op].max = clks;
32963 }
32964diff -urNp linux-2.6.32.46/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.46/drivers/misc/sgi-gru/gruprocfs.c
32965--- linux-2.6.32.46/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
32966+++ linux-2.6.32.46/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
32967@@ -32,9 +32,9 @@
32968
32969 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32970
32971-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32972+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32973 {
32974- unsigned long val = atomic_long_read(v);
32975+ unsigned long val = atomic_long_read_unchecked(v);
32976
32977 if (val)
32978 seq_printf(s, "%16lu %s\n", val, id);
32979@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
32980 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
32981
32982 for (op = 0; op < mcsop_last; op++) {
32983- count = atomic_long_read(&mcs_op_statistics[op].count);
32984- total = atomic_long_read(&mcs_op_statistics[op].total);
32985+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32986+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32987 max = mcs_op_statistics[op].max;
32988 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32989 count ? total / count : 0, max);
32990diff -urNp linux-2.6.32.46/drivers/misc/sgi-gru/grutables.h linux-2.6.32.46/drivers/misc/sgi-gru/grutables.h
32991--- linux-2.6.32.46/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
32992+++ linux-2.6.32.46/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
32993@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
32994 * GRU statistics.
32995 */
32996 struct gru_stats_s {
32997- atomic_long_t vdata_alloc;
32998- atomic_long_t vdata_free;
32999- atomic_long_t gts_alloc;
33000- atomic_long_t gts_free;
33001- atomic_long_t vdata_double_alloc;
33002- atomic_long_t gts_double_allocate;
33003- atomic_long_t assign_context;
33004- atomic_long_t assign_context_failed;
33005- atomic_long_t free_context;
33006- atomic_long_t load_user_context;
33007- atomic_long_t load_kernel_context;
33008- atomic_long_t lock_kernel_context;
33009- atomic_long_t unlock_kernel_context;
33010- atomic_long_t steal_user_context;
33011- atomic_long_t steal_kernel_context;
33012- atomic_long_t steal_context_failed;
33013- atomic_long_t nopfn;
33014- atomic_long_t break_cow;
33015- atomic_long_t asid_new;
33016- atomic_long_t asid_next;
33017- atomic_long_t asid_wrap;
33018- atomic_long_t asid_reuse;
33019- atomic_long_t intr;
33020- atomic_long_t intr_mm_lock_failed;
33021- atomic_long_t call_os;
33022- atomic_long_t call_os_offnode_reference;
33023- atomic_long_t call_os_check_for_bug;
33024- atomic_long_t call_os_wait_queue;
33025- atomic_long_t user_flush_tlb;
33026- atomic_long_t user_unload_context;
33027- atomic_long_t user_exception;
33028- atomic_long_t set_context_option;
33029- atomic_long_t migrate_check;
33030- atomic_long_t migrated_retarget;
33031- atomic_long_t migrated_unload;
33032- atomic_long_t migrated_unload_delay;
33033- atomic_long_t migrated_nopfn_retarget;
33034- atomic_long_t migrated_nopfn_unload;
33035- atomic_long_t tlb_dropin;
33036- atomic_long_t tlb_dropin_fail_no_asid;
33037- atomic_long_t tlb_dropin_fail_upm;
33038- atomic_long_t tlb_dropin_fail_invalid;
33039- atomic_long_t tlb_dropin_fail_range_active;
33040- atomic_long_t tlb_dropin_fail_idle;
33041- atomic_long_t tlb_dropin_fail_fmm;
33042- atomic_long_t tlb_dropin_fail_no_exception;
33043- atomic_long_t tlb_dropin_fail_no_exception_war;
33044- atomic_long_t tfh_stale_on_fault;
33045- atomic_long_t mmu_invalidate_range;
33046- atomic_long_t mmu_invalidate_page;
33047- atomic_long_t mmu_clear_flush_young;
33048- atomic_long_t flush_tlb;
33049- atomic_long_t flush_tlb_gru;
33050- atomic_long_t flush_tlb_gru_tgh;
33051- atomic_long_t flush_tlb_gru_zero_asid;
33052-
33053- atomic_long_t copy_gpa;
33054-
33055- atomic_long_t mesq_receive;
33056- atomic_long_t mesq_receive_none;
33057- atomic_long_t mesq_send;
33058- atomic_long_t mesq_send_failed;
33059- atomic_long_t mesq_noop;
33060- atomic_long_t mesq_send_unexpected_error;
33061- atomic_long_t mesq_send_lb_overflow;
33062- atomic_long_t mesq_send_qlimit_reached;
33063- atomic_long_t mesq_send_amo_nacked;
33064- atomic_long_t mesq_send_put_nacked;
33065- atomic_long_t mesq_qf_not_full;
33066- atomic_long_t mesq_qf_locked;
33067- atomic_long_t mesq_qf_noop_not_full;
33068- atomic_long_t mesq_qf_switch_head_failed;
33069- atomic_long_t mesq_qf_unexpected_error;
33070- atomic_long_t mesq_noop_unexpected_error;
33071- atomic_long_t mesq_noop_lb_overflow;
33072- atomic_long_t mesq_noop_qlimit_reached;
33073- atomic_long_t mesq_noop_amo_nacked;
33074- atomic_long_t mesq_noop_put_nacked;
33075+ atomic_long_unchecked_t vdata_alloc;
33076+ atomic_long_unchecked_t vdata_free;
33077+ atomic_long_unchecked_t gts_alloc;
33078+ atomic_long_unchecked_t gts_free;
33079+ atomic_long_unchecked_t vdata_double_alloc;
33080+ atomic_long_unchecked_t gts_double_allocate;
33081+ atomic_long_unchecked_t assign_context;
33082+ atomic_long_unchecked_t assign_context_failed;
33083+ atomic_long_unchecked_t free_context;
33084+ atomic_long_unchecked_t load_user_context;
33085+ atomic_long_unchecked_t load_kernel_context;
33086+ atomic_long_unchecked_t lock_kernel_context;
33087+ atomic_long_unchecked_t unlock_kernel_context;
33088+ atomic_long_unchecked_t steal_user_context;
33089+ atomic_long_unchecked_t steal_kernel_context;
33090+ atomic_long_unchecked_t steal_context_failed;
33091+ atomic_long_unchecked_t nopfn;
33092+ atomic_long_unchecked_t break_cow;
33093+ atomic_long_unchecked_t asid_new;
33094+ atomic_long_unchecked_t asid_next;
33095+ atomic_long_unchecked_t asid_wrap;
33096+ atomic_long_unchecked_t asid_reuse;
33097+ atomic_long_unchecked_t intr;
33098+ atomic_long_unchecked_t intr_mm_lock_failed;
33099+ atomic_long_unchecked_t call_os;
33100+ atomic_long_unchecked_t call_os_offnode_reference;
33101+ atomic_long_unchecked_t call_os_check_for_bug;
33102+ atomic_long_unchecked_t call_os_wait_queue;
33103+ atomic_long_unchecked_t user_flush_tlb;
33104+ atomic_long_unchecked_t user_unload_context;
33105+ atomic_long_unchecked_t user_exception;
33106+ atomic_long_unchecked_t set_context_option;
33107+ atomic_long_unchecked_t migrate_check;
33108+ atomic_long_unchecked_t migrated_retarget;
33109+ atomic_long_unchecked_t migrated_unload;
33110+ atomic_long_unchecked_t migrated_unload_delay;
33111+ atomic_long_unchecked_t migrated_nopfn_retarget;
33112+ atomic_long_unchecked_t migrated_nopfn_unload;
33113+ atomic_long_unchecked_t tlb_dropin;
33114+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
33115+ atomic_long_unchecked_t tlb_dropin_fail_upm;
33116+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
33117+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
33118+ atomic_long_unchecked_t tlb_dropin_fail_idle;
33119+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
33120+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
33121+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
33122+ atomic_long_unchecked_t tfh_stale_on_fault;
33123+ atomic_long_unchecked_t mmu_invalidate_range;
33124+ atomic_long_unchecked_t mmu_invalidate_page;
33125+ atomic_long_unchecked_t mmu_clear_flush_young;
33126+ atomic_long_unchecked_t flush_tlb;
33127+ atomic_long_unchecked_t flush_tlb_gru;
33128+ atomic_long_unchecked_t flush_tlb_gru_tgh;
33129+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
33130+
33131+ atomic_long_unchecked_t copy_gpa;
33132+
33133+ atomic_long_unchecked_t mesq_receive;
33134+ atomic_long_unchecked_t mesq_receive_none;
33135+ atomic_long_unchecked_t mesq_send;
33136+ atomic_long_unchecked_t mesq_send_failed;
33137+ atomic_long_unchecked_t mesq_noop;
33138+ atomic_long_unchecked_t mesq_send_unexpected_error;
33139+ atomic_long_unchecked_t mesq_send_lb_overflow;
33140+ atomic_long_unchecked_t mesq_send_qlimit_reached;
33141+ atomic_long_unchecked_t mesq_send_amo_nacked;
33142+ atomic_long_unchecked_t mesq_send_put_nacked;
33143+ atomic_long_unchecked_t mesq_qf_not_full;
33144+ atomic_long_unchecked_t mesq_qf_locked;
33145+ atomic_long_unchecked_t mesq_qf_noop_not_full;
33146+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
33147+ atomic_long_unchecked_t mesq_qf_unexpected_error;
33148+ atomic_long_unchecked_t mesq_noop_unexpected_error;
33149+ atomic_long_unchecked_t mesq_noop_lb_overflow;
33150+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
33151+ atomic_long_unchecked_t mesq_noop_amo_nacked;
33152+ atomic_long_unchecked_t mesq_noop_put_nacked;
33153
33154 };
33155
33156@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
33157 cchop_deallocate, tghop_invalidate, mcsop_last};
33158
33159 struct mcs_op_statistic {
33160- atomic_long_t count;
33161- atomic_long_t total;
33162+ atomic_long_unchecked_t count;
33163+ atomic_long_unchecked_t total;
33164 unsigned long max;
33165 };
33166
33167@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
33168
33169 #define STAT(id) do { \
33170 if (gru_options & OPT_STATS) \
33171- atomic_long_inc(&gru_stats.id); \
33172+ atomic_long_inc_unchecked(&gru_stats.id); \
33173 } while (0)
33174
33175 #ifdef CONFIG_SGI_GRU_DEBUG
33176diff -urNp linux-2.6.32.46/drivers/misc/sgi-xp/xpc.h linux-2.6.32.46/drivers/misc/sgi-xp/xpc.h
33177--- linux-2.6.32.46/drivers/misc/sgi-xp/xpc.h 2011-03-27 14:31:47.000000000 -0400
33178+++ linux-2.6.32.46/drivers/misc/sgi-xp/xpc.h 2011-08-05 20:33:55.000000000 -0400
33179@@ -876,7 +876,7 @@ extern struct xpc_registration xpc_regis
33180 /* found in xpc_main.c */
33181 extern struct device *xpc_part;
33182 extern struct device *xpc_chan;
33183-extern struct xpc_arch_operations xpc_arch_ops;
33184+extern const struct xpc_arch_operations xpc_arch_ops;
33185 extern int xpc_disengage_timelimit;
33186 extern int xpc_disengage_timedout;
33187 extern int xpc_activate_IRQ_rcvd;
33188diff -urNp linux-2.6.32.46/drivers/misc/sgi-xp/xpc_main.c linux-2.6.32.46/drivers/misc/sgi-xp/xpc_main.c
33189--- linux-2.6.32.46/drivers/misc/sgi-xp/xpc_main.c 2011-03-27 14:31:47.000000000 -0400
33190+++ linux-2.6.32.46/drivers/misc/sgi-xp/xpc_main.c 2011-08-05 20:33:55.000000000 -0400
33191@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_not
33192 .notifier_call = xpc_system_die,
33193 };
33194
33195-struct xpc_arch_operations xpc_arch_ops;
33196+const struct xpc_arch_operations xpc_arch_ops;
33197
33198 /*
33199 * Timer function to enforce the timelimit on the partition disengage.
33200diff -urNp linux-2.6.32.46/drivers/misc/sgi-xp/xpc_sn2.c linux-2.6.32.46/drivers/misc/sgi-xp/xpc_sn2.c
33201--- linux-2.6.32.46/drivers/misc/sgi-xp/xpc_sn2.c 2011-03-27 14:31:47.000000000 -0400
33202+++ linux-2.6.32.46/drivers/misc/sgi-xp/xpc_sn2.c 2011-08-05 20:33:55.000000000 -0400
33203@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_chan
33204 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
33205 }
33206
33207-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
33208+static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
33209 .setup_partitions = xpc_setup_partitions_sn2,
33210 .teardown_partitions = xpc_teardown_partitions_sn2,
33211 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
33212@@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
33213 int ret;
33214 size_t buf_size;
33215
33216- xpc_arch_ops = xpc_arch_ops_sn2;
33217+ pax_open_kernel();
33218+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
33219+ pax_close_kernel();
33220
33221 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
33222 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
33223diff -urNp linux-2.6.32.46/drivers/misc/sgi-xp/xpc_uv.c linux-2.6.32.46/drivers/misc/sgi-xp/xpc_uv.c
33224--- linux-2.6.32.46/drivers/misc/sgi-xp/xpc_uv.c 2011-03-27 14:31:47.000000000 -0400
33225+++ linux-2.6.32.46/drivers/misc/sgi-xp/xpc_uv.c 2011-08-05 20:33:55.000000000 -0400
33226@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_chann
33227 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
33228 }
33229
33230-static struct xpc_arch_operations xpc_arch_ops_uv = {
33231+static const struct xpc_arch_operations xpc_arch_ops_uv = {
33232 .setup_partitions = xpc_setup_partitions_uv,
33233 .teardown_partitions = xpc_teardown_partitions_uv,
33234 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
33235@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_ar
33236 int
33237 xpc_init_uv(void)
33238 {
33239- xpc_arch_ops = xpc_arch_ops_uv;
33240+ pax_open_kernel();
33241+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
33242+ pax_close_kernel();
33243
33244 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
33245 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
33246diff -urNp linux-2.6.32.46/drivers/misc/sgi-xp/xp.h linux-2.6.32.46/drivers/misc/sgi-xp/xp.h
33247--- linux-2.6.32.46/drivers/misc/sgi-xp/xp.h 2011-03-27 14:31:47.000000000 -0400
33248+++ linux-2.6.32.46/drivers/misc/sgi-xp/xp.h 2011-08-05 20:33:55.000000000 -0400
33249@@ -289,7 +289,7 @@ struct xpc_interface {
33250 xpc_notify_func, void *);
33251 void (*received) (short, int, void *);
33252 enum xp_retval (*partid_to_nasids) (short, void *);
33253-};
33254+} __no_const;
33255
33256 extern struct xpc_interface xpc_interface;
33257
33258diff -urNp linux-2.6.32.46/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.46/drivers/mtd/chips/cfi_cmdset_0001.c
33259--- linux-2.6.32.46/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
33260+++ linux-2.6.32.46/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
33261@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
33262 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
33263 unsigned long timeo = jiffies + HZ;
33264
33265+ pax_track_stack();
33266+
33267 /* Prevent setting state FL_SYNCING for chip in suspended state. */
33268 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
33269 goto sleep;
33270@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
33271 unsigned long initial_adr;
33272 int initial_len = len;
33273
33274+ pax_track_stack();
33275+
33276 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
33277 adr += chip->start;
33278 initial_adr = adr;
33279@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
33280 int retries = 3;
33281 int ret;
33282
33283+ pax_track_stack();
33284+
33285 adr += chip->start;
33286
33287 retry:
33288diff -urNp linux-2.6.32.46/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.46/drivers/mtd/chips/cfi_cmdset_0020.c
33289--- linux-2.6.32.46/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
33290+++ linux-2.6.32.46/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
33291@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
33292 unsigned long cmd_addr;
33293 struct cfi_private *cfi = map->fldrv_priv;
33294
33295+ pax_track_stack();
33296+
33297 adr += chip->start;
33298
33299 /* Ensure cmd read/writes are aligned. */
33300@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
33301 DECLARE_WAITQUEUE(wait, current);
33302 int wbufsize, z;
33303
33304+ pax_track_stack();
33305+
33306 /* M58LW064A requires bus alignment for buffer wriets -- saw */
33307 if (adr & (map_bankwidth(map)-1))
33308 return -EINVAL;
33309@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
33310 DECLARE_WAITQUEUE(wait, current);
33311 int ret = 0;
33312
33313+ pax_track_stack();
33314+
33315 adr += chip->start;
33316
33317 /* Let's determine this according to the interleave only once */
33318@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
33319 unsigned long timeo = jiffies + HZ;
33320 DECLARE_WAITQUEUE(wait, current);
33321
33322+ pax_track_stack();
33323+
33324 adr += chip->start;
33325
33326 /* Let's determine this according to the interleave only once */
33327@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
33328 unsigned long timeo = jiffies + HZ;
33329 DECLARE_WAITQUEUE(wait, current);
33330
33331+ pax_track_stack();
33332+
33333 adr += chip->start;
33334
33335 /* Let's determine this according to the interleave only once */
33336diff -urNp linux-2.6.32.46/drivers/mtd/devices/doc2000.c linux-2.6.32.46/drivers/mtd/devices/doc2000.c
33337--- linux-2.6.32.46/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
33338+++ linux-2.6.32.46/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
33339@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
33340
33341 /* The ECC will not be calculated correctly if less than 512 is written */
33342 /* DBB-
33343- if (len != 0x200 && eccbuf)
33344+ if (len != 0x200)
33345 printk(KERN_WARNING
33346 "ECC needs a full sector write (adr: %lx size %lx)\n",
33347 (long) to, (long) len);
33348diff -urNp linux-2.6.32.46/drivers/mtd/devices/doc2001.c linux-2.6.32.46/drivers/mtd/devices/doc2001.c
33349--- linux-2.6.32.46/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
33350+++ linux-2.6.32.46/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
33351@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
33352 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
33353
33354 /* Don't allow read past end of device */
33355- if (from >= this->totlen)
33356+ if (from >= this->totlen || !len)
33357 return -EINVAL;
33358
33359 /* Don't allow a single read to cross a 512-byte block boundary */
33360diff -urNp linux-2.6.32.46/drivers/mtd/ftl.c linux-2.6.32.46/drivers/mtd/ftl.c
33361--- linux-2.6.32.46/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
33362+++ linux-2.6.32.46/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
33363@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
33364 loff_t offset;
33365 uint16_t srcunitswap = cpu_to_le16(srcunit);
33366
33367+ pax_track_stack();
33368+
33369 eun = &part->EUNInfo[srcunit];
33370 xfer = &part->XferInfo[xferunit];
33371 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
33372diff -urNp linux-2.6.32.46/drivers/mtd/inftlcore.c linux-2.6.32.46/drivers/mtd/inftlcore.c
33373--- linux-2.6.32.46/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
33374+++ linux-2.6.32.46/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
33375@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
33376 struct inftl_oob oob;
33377 size_t retlen;
33378
33379+ pax_track_stack();
33380+
33381 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
33382 "pending=%d)\n", inftl, thisVUC, pendingblock);
33383
33384diff -urNp linux-2.6.32.46/drivers/mtd/inftlmount.c linux-2.6.32.46/drivers/mtd/inftlmount.c
33385--- linux-2.6.32.46/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
33386+++ linux-2.6.32.46/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
33387@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
33388 struct INFTLPartition *ip;
33389 size_t retlen;
33390
33391+ pax_track_stack();
33392+
33393 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
33394
33395 /*
33396diff -urNp linux-2.6.32.46/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.46/drivers/mtd/lpddr/qinfo_probe.c
33397--- linux-2.6.32.46/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
33398+++ linux-2.6.32.46/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
33399@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
33400 {
33401 map_word pfow_val[4];
33402
33403+ pax_track_stack();
33404+
33405 /* Check identification string */
33406 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
33407 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
33408diff -urNp linux-2.6.32.46/drivers/mtd/mtdchar.c linux-2.6.32.46/drivers/mtd/mtdchar.c
33409--- linux-2.6.32.46/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
33410+++ linux-2.6.32.46/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
33411@@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
33412 u_long size;
33413 struct mtd_info_user info;
33414
33415+ pax_track_stack();
33416+
33417 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
33418
33419 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
33420diff -urNp linux-2.6.32.46/drivers/mtd/nftlcore.c linux-2.6.32.46/drivers/mtd/nftlcore.c
33421--- linux-2.6.32.46/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
33422+++ linux-2.6.32.46/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
33423@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
33424 int inplace = 1;
33425 size_t retlen;
33426
33427+ pax_track_stack();
33428+
33429 memset(BlockMap, 0xff, sizeof(BlockMap));
33430 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
33431
33432diff -urNp linux-2.6.32.46/drivers/mtd/nftlmount.c linux-2.6.32.46/drivers/mtd/nftlmount.c
33433--- linux-2.6.32.46/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
33434+++ linux-2.6.32.46/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
33435@@ -23,6 +23,7 @@
33436 #include <asm/errno.h>
33437 #include <linux/delay.h>
33438 #include <linux/slab.h>
33439+#include <linux/sched.h>
33440 #include <linux/mtd/mtd.h>
33441 #include <linux/mtd/nand.h>
33442 #include <linux/mtd/nftl.h>
33443@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
33444 struct mtd_info *mtd = nftl->mbd.mtd;
33445 unsigned int i;
33446
33447+ pax_track_stack();
33448+
33449 /* Assume logical EraseSize == physical erasesize for starting the scan.
33450 We'll sort it out later if we find a MediaHeader which says otherwise */
33451 /* Actually, we won't. The new DiskOnChip driver has already scanned
33452diff -urNp linux-2.6.32.46/drivers/mtd/ubi/build.c linux-2.6.32.46/drivers/mtd/ubi/build.c
33453--- linux-2.6.32.46/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
33454+++ linux-2.6.32.46/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
33455@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
33456 static int __init bytes_str_to_int(const char *str)
33457 {
33458 char *endp;
33459- unsigned long result;
33460+ unsigned long result, scale = 1;
33461
33462 result = simple_strtoul(str, &endp, 0);
33463 if (str == endp || result >= INT_MAX) {
33464@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
33465
33466 switch (*endp) {
33467 case 'G':
33468- result *= 1024;
33469+ scale *= 1024;
33470 case 'M':
33471- result *= 1024;
33472+ scale *= 1024;
33473 case 'K':
33474- result *= 1024;
33475+ scale *= 1024;
33476 if (endp[1] == 'i' && endp[2] == 'B')
33477 endp += 2;
33478 case '\0':
33479@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
33480 return -EINVAL;
33481 }
33482
33483- return result;
33484+ if ((intoverflow_t)result*scale >= INT_MAX) {
33485+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
33486+ str);
33487+ return -EINVAL;
33488+ }
33489+
33490+ return result*scale;
33491 }
33492
33493 /**
33494diff -urNp linux-2.6.32.46/drivers/net/bnx2.c linux-2.6.32.46/drivers/net/bnx2.c
33495--- linux-2.6.32.46/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
33496+++ linux-2.6.32.46/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
33497@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
33498 int rc = 0;
33499 u32 magic, csum;
33500
33501+ pax_track_stack();
33502+
33503 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
33504 goto test_nvram_done;
33505
33506diff -urNp linux-2.6.32.46/drivers/net/cxgb3/l2t.h linux-2.6.32.46/drivers/net/cxgb3/l2t.h
33507--- linux-2.6.32.46/drivers/net/cxgb3/l2t.h 2011-03-27 14:31:47.000000000 -0400
33508+++ linux-2.6.32.46/drivers/net/cxgb3/l2t.h 2011-08-05 20:33:55.000000000 -0400
33509@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
33510 */
33511 struct l2t_skb_cb {
33512 arp_failure_handler_func arp_failure_handler;
33513-};
33514+} __no_const;
33515
33516 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
33517
33518diff -urNp linux-2.6.32.46/drivers/net/cxgb3/t3_hw.c linux-2.6.32.46/drivers/net/cxgb3/t3_hw.c
33519--- linux-2.6.32.46/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
33520+++ linux-2.6.32.46/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
33521@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
33522 int i, addr, ret;
33523 struct t3_vpd vpd;
33524
33525+ pax_track_stack();
33526+
33527 /*
33528 * Card information is normally at VPD_BASE but some early cards had
33529 * it at 0.
33530diff -urNp linux-2.6.32.46/drivers/net/e1000e/82571.c linux-2.6.32.46/drivers/net/e1000e/82571.c
33531--- linux-2.6.32.46/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
33532+++ linux-2.6.32.46/drivers/net/e1000e/82571.c 2011-08-23 21:22:32.000000000 -0400
33533@@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(s
33534 {
33535 struct e1000_hw *hw = &adapter->hw;
33536 struct e1000_mac_info *mac = &hw->mac;
33537- struct e1000_mac_operations *func = &mac->ops;
33538+ e1000_mac_operations_no_const *func = &mac->ops;
33539 u32 swsm = 0;
33540 u32 swsm2 = 0;
33541 bool force_clear_smbi = false;
33542@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(s
33543 temp = er32(ICRXDMTC);
33544 }
33545
33546-static struct e1000_mac_operations e82571_mac_ops = {
33547+static const struct e1000_mac_operations e82571_mac_ops = {
33548 /* .check_mng_mode: mac type dependent */
33549 /* .check_for_link: media type dependent */
33550 .id_led_init = e1000e_id_led_init,
33551@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e8257
33552 .setup_led = e1000e_setup_led_generic,
33553 };
33554
33555-static struct e1000_phy_operations e82_phy_ops_igp = {
33556+static const struct e1000_phy_operations e82_phy_ops_igp = {
33557 .acquire_phy = e1000_get_hw_semaphore_82571,
33558 .check_reset_block = e1000e_check_reset_block_generic,
33559 .commit_phy = NULL,
33560@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_p
33561 .cfg_on_link_up = NULL,
33562 };
33563
33564-static struct e1000_phy_operations e82_phy_ops_m88 = {
33565+static const struct e1000_phy_operations e82_phy_ops_m88 = {
33566 .acquire_phy = e1000_get_hw_semaphore_82571,
33567 .check_reset_block = e1000e_check_reset_block_generic,
33568 .commit_phy = e1000e_phy_sw_reset,
33569@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_p
33570 .cfg_on_link_up = NULL,
33571 };
33572
33573-static struct e1000_phy_operations e82_phy_ops_bm = {
33574+static const struct e1000_phy_operations e82_phy_ops_bm = {
33575 .acquire_phy = e1000_get_hw_semaphore_82571,
33576 .check_reset_block = e1000e_check_reset_block_generic,
33577 .commit_phy = e1000e_phy_sw_reset,
33578@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_p
33579 .cfg_on_link_up = NULL,
33580 };
33581
33582-static struct e1000_nvm_operations e82571_nvm_ops = {
33583+static const struct e1000_nvm_operations e82571_nvm_ops = {
33584 .acquire_nvm = e1000_acquire_nvm_82571,
33585 .read_nvm = e1000e_read_nvm_eerd,
33586 .release_nvm = e1000_release_nvm_82571,
33587diff -urNp linux-2.6.32.46/drivers/net/e1000e/e1000.h linux-2.6.32.46/drivers/net/e1000e/e1000.h
33588--- linux-2.6.32.46/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
33589+++ linux-2.6.32.46/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
33590@@ -375,9 +375,9 @@ struct e1000_info {
33591 u32 pba;
33592 u32 max_hw_frame_size;
33593 s32 (*get_variants)(struct e1000_adapter *);
33594- struct e1000_mac_operations *mac_ops;
33595- struct e1000_phy_operations *phy_ops;
33596- struct e1000_nvm_operations *nvm_ops;
33597+ const struct e1000_mac_operations *mac_ops;
33598+ const struct e1000_phy_operations *phy_ops;
33599+ const struct e1000_nvm_operations *nvm_ops;
33600 };
33601
33602 /* hardware capability, feature, and workaround flags */
33603diff -urNp linux-2.6.32.46/drivers/net/e1000e/es2lan.c linux-2.6.32.46/drivers/net/e1000e/es2lan.c
33604--- linux-2.6.32.46/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
33605+++ linux-2.6.32.46/drivers/net/e1000e/es2lan.c 2011-08-23 21:22:32.000000000 -0400
33606@@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es
33607 {
33608 struct e1000_hw *hw = &adapter->hw;
33609 struct e1000_mac_info *mac = &hw->mac;
33610- struct e1000_mac_operations *func = &mac->ops;
33611+ e1000_mac_operations_no_const *func = &mac->ops;
33612
33613 /* Set media type */
33614 switch (adapter->pdev->device) {
33615@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es
33616 temp = er32(ICRXDMTC);
33617 }
33618
33619-static struct e1000_mac_operations es2_mac_ops = {
33620+static const struct e1000_mac_operations es2_mac_ops = {
33621 .id_led_init = e1000e_id_led_init,
33622 .check_mng_mode = e1000e_check_mng_mode_generic,
33623 /* check_for_link dependent on media type */
33624@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_m
33625 .setup_led = e1000e_setup_led_generic,
33626 };
33627
33628-static struct e1000_phy_operations es2_phy_ops = {
33629+static const struct e1000_phy_operations es2_phy_ops = {
33630 .acquire_phy = e1000_acquire_phy_80003es2lan,
33631 .check_reset_block = e1000e_check_reset_block_generic,
33632 .commit_phy = e1000e_phy_sw_reset,
33633@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_p
33634 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
33635 };
33636
33637-static struct e1000_nvm_operations es2_nvm_ops = {
33638+static const struct e1000_nvm_operations es2_nvm_ops = {
33639 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
33640 .read_nvm = e1000e_read_nvm_eerd,
33641 .release_nvm = e1000_release_nvm_80003es2lan,
33642diff -urNp linux-2.6.32.46/drivers/net/e1000e/hw.h linux-2.6.32.46/drivers/net/e1000e/hw.h
33643--- linux-2.6.32.46/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
33644+++ linux-2.6.32.46/drivers/net/e1000e/hw.h 2011-08-23 21:27:38.000000000 -0400
33645@@ -753,6 +753,7 @@ struct e1000_mac_operations {
33646 s32 (*setup_physical_interface)(struct e1000_hw *);
33647 s32 (*setup_led)(struct e1000_hw *);
33648 };
33649+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33650
33651 /* Function pointers for the PHY. */
33652 struct e1000_phy_operations {
33653@@ -774,6 +775,7 @@ struct e1000_phy_operations {
33654 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
33655 s32 (*cfg_on_link_up)(struct e1000_hw *);
33656 };
33657+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
33658
33659 /* Function pointers for the NVM. */
33660 struct e1000_nvm_operations {
33661@@ -785,9 +787,10 @@ struct e1000_nvm_operations {
33662 s32 (*validate_nvm)(struct e1000_hw *);
33663 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
33664 };
33665+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
33666
33667 struct e1000_mac_info {
33668- struct e1000_mac_operations ops;
33669+ e1000_mac_operations_no_const ops;
33670
33671 u8 addr[6];
33672 u8 perm_addr[6];
33673@@ -823,7 +826,7 @@ struct e1000_mac_info {
33674 };
33675
33676 struct e1000_phy_info {
33677- struct e1000_phy_operations ops;
33678+ e1000_phy_operations_no_const ops;
33679
33680 enum e1000_phy_type type;
33681
33682@@ -857,7 +860,7 @@ struct e1000_phy_info {
33683 };
33684
33685 struct e1000_nvm_info {
33686- struct e1000_nvm_operations ops;
33687+ e1000_nvm_operations_no_const ops;
33688
33689 enum e1000_nvm_type type;
33690 enum e1000_nvm_override override;
33691diff -urNp linux-2.6.32.46/drivers/net/e1000e/ich8lan.c linux-2.6.32.46/drivers/net/e1000e/ich8lan.c
33692--- linux-2.6.32.46/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
33693+++ linux-2.6.32.46/drivers/net/e1000e/ich8lan.c 2011-08-23 21:22:32.000000000 -0400
33694@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
33695 }
33696 }
33697
33698-static struct e1000_mac_operations ich8_mac_ops = {
33699+static const struct e1000_mac_operations ich8_mac_ops = {
33700 .id_led_init = e1000e_id_led_init,
33701 .check_mng_mode = e1000_check_mng_mode_ich8lan,
33702 .check_for_link = e1000_check_for_copper_link_ich8lan,
33703@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
33704 /* id_led_init dependent on mac type */
33705 };
33706
33707-static struct e1000_phy_operations ich8_phy_ops = {
33708+static const struct e1000_phy_operations ich8_phy_ops = {
33709 .acquire_phy = e1000_acquire_swflag_ich8lan,
33710 .check_reset_block = e1000_check_reset_block_ich8lan,
33711 .commit_phy = NULL,
33712@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
33713 .write_phy_reg = e1000e_write_phy_reg_igp,
33714 };
33715
33716-static struct e1000_nvm_operations ich8_nvm_ops = {
33717+static const struct e1000_nvm_operations ich8_nvm_ops = {
33718 .acquire_nvm = e1000_acquire_nvm_ich8lan,
33719 .read_nvm = e1000_read_nvm_ich8lan,
33720 .release_nvm = e1000_release_nvm_ich8lan,
33721diff -urNp linux-2.6.32.46/drivers/net/hamradio/6pack.c linux-2.6.32.46/drivers/net/hamradio/6pack.c
33722--- linux-2.6.32.46/drivers/net/hamradio/6pack.c 2011-07-13 17:23:04.000000000 -0400
33723+++ linux-2.6.32.46/drivers/net/hamradio/6pack.c 2011-07-13 17:23:18.000000000 -0400
33724@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
33725 unsigned char buf[512];
33726 int count1;
33727
33728+ pax_track_stack();
33729+
33730 if (!count)
33731 return;
33732
33733diff -urNp linux-2.6.32.46/drivers/net/ibmveth.c linux-2.6.32.46/drivers/net/ibmveth.c
33734--- linux-2.6.32.46/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
33735+++ linux-2.6.32.46/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
33736@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
33737 NULL,
33738 };
33739
33740-static struct sysfs_ops veth_pool_ops = {
33741+static const struct sysfs_ops veth_pool_ops = {
33742 .show = veth_pool_show,
33743 .store = veth_pool_store,
33744 };
33745diff -urNp linux-2.6.32.46/drivers/net/igb/e1000_82575.c linux-2.6.32.46/drivers/net/igb/e1000_82575.c
33746--- linux-2.6.32.46/drivers/net/igb/e1000_82575.c 2011-08-29 22:24:44.000000000 -0400
33747+++ linux-2.6.32.46/drivers/net/igb/e1000_82575.c 2011-08-29 22:25:07.000000000 -0400
33748@@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct
33749 wr32(E1000_VT_CTL, vt_ctl);
33750 }
33751
33752-static struct e1000_mac_operations e1000_mac_ops_82575 = {
33753+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
33754 .reset_hw = igb_reset_hw_82575,
33755 .init_hw = igb_init_hw_82575,
33756 .check_for_link = igb_check_for_link_82575,
33757@@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000
33758 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
33759 };
33760
33761-static struct e1000_phy_operations e1000_phy_ops_82575 = {
33762+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
33763 .acquire = igb_acquire_phy_82575,
33764 .get_cfg_done = igb_get_cfg_done_82575,
33765 .release = igb_release_phy_82575,
33766 };
33767
33768-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
33769+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
33770 .acquire = igb_acquire_nvm_82575,
33771 .read = igb_read_nvm_eerd,
33772 .release = igb_release_nvm_82575,
33773diff -urNp linux-2.6.32.46/drivers/net/igb/e1000_hw.h linux-2.6.32.46/drivers/net/igb/e1000_hw.h
33774--- linux-2.6.32.46/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
33775+++ linux-2.6.32.46/drivers/net/igb/e1000_hw.h 2011-08-23 21:28:01.000000000 -0400
33776@@ -288,6 +288,7 @@ struct e1000_mac_operations {
33777 s32 (*read_mac_addr)(struct e1000_hw *);
33778 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
33779 };
33780+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33781
33782 struct e1000_phy_operations {
33783 s32 (*acquire)(struct e1000_hw *);
33784@@ -303,6 +304,7 @@ struct e1000_phy_operations {
33785 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
33786 s32 (*write_reg)(struct e1000_hw *, u32, u16);
33787 };
33788+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
33789
33790 struct e1000_nvm_operations {
33791 s32 (*acquire)(struct e1000_hw *);
33792@@ -310,6 +312,7 @@ struct e1000_nvm_operations {
33793 void (*release)(struct e1000_hw *);
33794 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
33795 };
33796+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
33797
33798 struct e1000_info {
33799 s32 (*get_invariants)(struct e1000_hw *);
33800@@ -321,7 +324,7 @@ struct e1000_info {
33801 extern const struct e1000_info e1000_82575_info;
33802
33803 struct e1000_mac_info {
33804- struct e1000_mac_operations ops;
33805+ e1000_mac_operations_no_const ops;
33806
33807 u8 addr[6];
33808 u8 perm_addr[6];
33809@@ -365,7 +368,7 @@ struct e1000_mac_info {
33810 };
33811
33812 struct e1000_phy_info {
33813- struct e1000_phy_operations ops;
33814+ e1000_phy_operations_no_const ops;
33815
33816 enum e1000_phy_type type;
33817
33818@@ -400,7 +403,7 @@ struct e1000_phy_info {
33819 };
33820
33821 struct e1000_nvm_info {
33822- struct e1000_nvm_operations ops;
33823+ e1000_nvm_operations_no_const ops;
33824
33825 enum e1000_nvm_type type;
33826 enum e1000_nvm_override override;
33827@@ -446,6 +449,7 @@ struct e1000_mbx_operations {
33828 s32 (*check_for_ack)(struct e1000_hw *, u16);
33829 s32 (*check_for_rst)(struct e1000_hw *, u16);
33830 };
33831+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
33832
33833 struct e1000_mbx_stats {
33834 u32 msgs_tx;
33835@@ -457,7 +461,7 @@ struct e1000_mbx_stats {
33836 };
33837
33838 struct e1000_mbx_info {
33839- struct e1000_mbx_operations ops;
33840+ e1000_mbx_operations_no_const ops;
33841 struct e1000_mbx_stats stats;
33842 u32 timeout;
33843 u32 usec_delay;
33844diff -urNp linux-2.6.32.46/drivers/net/igbvf/vf.h linux-2.6.32.46/drivers/net/igbvf/vf.h
33845--- linux-2.6.32.46/drivers/net/igbvf/vf.h 2011-03-27 14:31:47.000000000 -0400
33846+++ linux-2.6.32.46/drivers/net/igbvf/vf.h 2011-08-23 21:22:38.000000000 -0400
33847@@ -187,9 +187,10 @@ struct e1000_mac_operations {
33848 s32 (*read_mac_addr)(struct e1000_hw *);
33849 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
33850 };
33851+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33852
33853 struct e1000_mac_info {
33854- struct e1000_mac_operations ops;
33855+ e1000_mac_operations_no_const ops;
33856 u8 addr[6];
33857 u8 perm_addr[6];
33858
33859@@ -211,6 +212,7 @@ struct e1000_mbx_operations {
33860 s32 (*check_for_ack)(struct e1000_hw *);
33861 s32 (*check_for_rst)(struct e1000_hw *);
33862 };
33863+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
33864
33865 struct e1000_mbx_stats {
33866 u32 msgs_tx;
33867@@ -222,7 +224,7 @@ struct e1000_mbx_stats {
33868 };
33869
33870 struct e1000_mbx_info {
33871- struct e1000_mbx_operations ops;
33872+ e1000_mbx_operations_no_const ops;
33873 struct e1000_mbx_stats stats;
33874 u32 timeout;
33875 u32 usec_delay;
33876diff -urNp linux-2.6.32.46/drivers/net/iseries_veth.c linux-2.6.32.46/drivers/net/iseries_veth.c
33877--- linux-2.6.32.46/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
33878+++ linux-2.6.32.46/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
33879@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
33880 NULL
33881 };
33882
33883-static struct sysfs_ops veth_cnx_sysfs_ops = {
33884+static const struct sysfs_ops veth_cnx_sysfs_ops = {
33885 .show = veth_cnx_attribute_show
33886 };
33887
33888@@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
33889 NULL
33890 };
33891
33892-static struct sysfs_ops veth_port_sysfs_ops = {
33893+static const struct sysfs_ops veth_port_sysfs_ops = {
33894 .show = veth_port_attribute_show
33895 };
33896
33897diff -urNp linux-2.6.32.46/drivers/net/ixgb/ixgb_main.c linux-2.6.32.46/drivers/net/ixgb/ixgb_main.c
33898--- linux-2.6.32.46/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
33899+++ linux-2.6.32.46/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
33900@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
33901 u32 rctl;
33902 int i;
33903
33904+ pax_track_stack();
33905+
33906 /* Check for Promiscuous and All Multicast modes */
33907
33908 rctl = IXGB_READ_REG(hw, RCTL);
33909diff -urNp linux-2.6.32.46/drivers/net/ixgb/ixgb_param.c linux-2.6.32.46/drivers/net/ixgb/ixgb_param.c
33910--- linux-2.6.32.46/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
33911+++ linux-2.6.32.46/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
33912@@ -260,6 +260,9 @@ void __devinit
33913 ixgb_check_options(struct ixgb_adapter *adapter)
33914 {
33915 int bd = adapter->bd_number;
33916+
33917+ pax_track_stack();
33918+
33919 if (bd >= IXGB_MAX_NIC) {
33920 printk(KERN_NOTICE
33921 "Warning: no configuration for board #%i\n", bd);
33922diff -urNp linux-2.6.32.46/drivers/net/ixgbe/ixgbe_type.h linux-2.6.32.46/drivers/net/ixgbe/ixgbe_type.h
33923--- linux-2.6.32.46/drivers/net/ixgbe/ixgbe_type.h 2011-03-27 14:31:47.000000000 -0400
33924+++ linux-2.6.32.46/drivers/net/ixgbe/ixgbe_type.h 2011-08-23 21:22:38.000000000 -0400
33925@@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
33926 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
33927 s32 (*update_checksum)(struct ixgbe_hw *);
33928 };
33929+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
33930
33931 struct ixgbe_mac_operations {
33932 s32 (*init_hw)(struct ixgbe_hw *);
33933@@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
33934 /* Flow Control */
33935 s32 (*fc_enable)(struct ixgbe_hw *, s32);
33936 };
33937+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
33938
33939 struct ixgbe_phy_operations {
33940 s32 (*identify)(struct ixgbe_hw *);
33941@@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
33942 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
33943 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
33944 };
33945+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
33946
33947 struct ixgbe_eeprom_info {
33948- struct ixgbe_eeprom_operations ops;
33949+ ixgbe_eeprom_operations_no_const ops;
33950 enum ixgbe_eeprom_type type;
33951 u32 semaphore_delay;
33952 u16 word_size;
33953@@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
33954 };
33955
33956 struct ixgbe_mac_info {
33957- struct ixgbe_mac_operations ops;
33958+ ixgbe_mac_operations_no_const ops;
33959 enum ixgbe_mac_type type;
33960 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
33961 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
33962@@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
33963 };
33964
33965 struct ixgbe_phy_info {
33966- struct ixgbe_phy_operations ops;
33967+ ixgbe_phy_operations_no_const ops;
33968 struct mdio_if_info mdio;
33969 enum ixgbe_phy_type type;
33970 u32 id;
33971diff -urNp linux-2.6.32.46/drivers/net/mlx4/main.c linux-2.6.32.46/drivers/net/mlx4/main.c
33972--- linux-2.6.32.46/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
33973+++ linux-2.6.32.46/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
33974@@ -38,6 +38,7 @@
33975 #include <linux/errno.h>
33976 #include <linux/pci.h>
33977 #include <linux/dma-mapping.h>
33978+#include <linux/sched.h>
33979
33980 #include <linux/mlx4/device.h>
33981 #include <linux/mlx4/doorbell.h>
33982@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
33983 u64 icm_size;
33984 int err;
33985
33986+ pax_track_stack();
33987+
33988 err = mlx4_QUERY_FW(dev);
33989 if (err) {
33990 if (err == -EACCES)
33991diff -urNp linux-2.6.32.46/drivers/net/niu.c linux-2.6.32.46/drivers/net/niu.c
33992--- linux-2.6.32.46/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
33993+++ linux-2.6.32.46/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
33994@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
33995 int i, num_irqs, err;
33996 u8 first_ldg;
33997
33998+ pax_track_stack();
33999+
34000 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
34001 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
34002 ldg_num_map[i] = first_ldg + i;
34003diff -urNp linux-2.6.32.46/drivers/net/pcnet32.c linux-2.6.32.46/drivers/net/pcnet32.c
34004--- linux-2.6.32.46/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
34005+++ linux-2.6.32.46/drivers/net/pcnet32.c 2011-08-05 20:33:55.000000000 -0400
34006@@ -79,7 +79,7 @@ static int cards_found;
34007 /*
34008 * VLB I/O addresses
34009 */
34010-static unsigned int pcnet32_portlist[] __initdata =
34011+static unsigned int pcnet32_portlist[] __devinitdata =
34012 { 0x300, 0x320, 0x340, 0x360, 0 };
34013
34014 static int pcnet32_debug = 0;
34015@@ -267,7 +267,7 @@ struct pcnet32_private {
34016 struct sk_buff **rx_skbuff;
34017 dma_addr_t *tx_dma_addr;
34018 dma_addr_t *rx_dma_addr;
34019- struct pcnet32_access a;
34020+ struct pcnet32_access *a;
34021 spinlock_t lock; /* Guard lock */
34022 unsigned int cur_rx, cur_tx; /* The next free ring entry */
34023 unsigned int rx_ring_size; /* current rx ring size */
34024@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct n
34025 u16 val;
34026
34027 netif_wake_queue(dev);
34028- val = lp->a.read_csr(ioaddr, CSR3);
34029+ val = lp->a->read_csr(ioaddr, CSR3);
34030 val &= 0x00ff;
34031- lp->a.write_csr(ioaddr, CSR3, val);
34032+ lp->a->write_csr(ioaddr, CSR3, val);
34033 napi_enable(&lp->napi);
34034 }
34035
34036@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_d
34037 r = mii_link_ok(&lp->mii_if);
34038 } else if (lp->chip_version >= PCNET32_79C970A) {
34039 ulong ioaddr = dev->base_addr; /* card base I/O address */
34040- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34041+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34042 } else { /* can not detect link on really old chips */
34043 r = 1;
34044 }
34045@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct
34046 pcnet32_netif_stop(dev);
34047
34048 spin_lock_irqsave(&lp->lock, flags);
34049- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34050+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34051
34052 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
34053
34054@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct
34055 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
34056 {
34057 struct pcnet32_private *lp = netdev_priv(dev);
34058- struct pcnet32_access *a = &lp->a; /* access to registers */
34059+ struct pcnet32_access *a = lp->a; /* access to registers */
34060 ulong ioaddr = dev->base_addr; /* card base I/O address */
34061 struct sk_buff *skb; /* sk buff */
34062 int x, i; /* counters */
34063@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct
34064 pcnet32_netif_stop(dev);
34065
34066 spin_lock_irqsave(&lp->lock, flags);
34067- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34068+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34069
34070 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
34071
34072 /* Reset the PCNET32 */
34073- lp->a.reset(ioaddr);
34074- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34075+ lp->a->reset(ioaddr);
34076+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34077
34078 /* switch pcnet32 to 32bit mode */
34079- lp->a.write_bcr(ioaddr, 20, 2);
34080+ lp->a->write_bcr(ioaddr, 20, 2);
34081
34082 /* purge & init rings but don't actually restart */
34083 pcnet32_restart(dev, 0x0000);
34084
34085- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34086+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34087
34088 /* Initialize Transmit buffers. */
34089 size = data_len + 15;
34090@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct
34091
34092 /* set int loopback in CSR15 */
34093 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
34094- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
34095+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
34096
34097 teststatus = cpu_to_le16(0x8000);
34098- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34099+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34100
34101 /* Check status of descriptors */
34102 for (x = 0; x < numbuffs; x++) {
34103@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct
34104 }
34105 }
34106
34107- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34108+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34109 wmb();
34110 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
34111 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
34112@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct
34113 pcnet32_restart(dev, CSR0_NORMAL);
34114 } else {
34115 pcnet32_purge_rx_ring(dev);
34116- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34117+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34118 }
34119 spin_unlock_irqrestore(&lp->lock, flags);
34120
34121@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct
34122 static void pcnet32_led_blink_callback(struct net_device *dev)
34123 {
34124 struct pcnet32_private *lp = netdev_priv(dev);
34125- struct pcnet32_access *a = &lp->a;
34126+ struct pcnet32_access *a = lp->a;
34127 ulong ioaddr = dev->base_addr;
34128 unsigned long flags;
34129 int i;
34130@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(s
34131 static int pcnet32_phys_id(struct net_device *dev, u32 data)
34132 {
34133 struct pcnet32_private *lp = netdev_priv(dev);
34134- struct pcnet32_access *a = &lp->a;
34135+ struct pcnet32_access *a = lp->a;
34136 ulong ioaddr = dev->base_addr;
34137 unsigned long flags;
34138 int i, regs[4];
34139@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_de
34140 {
34141 int csr5;
34142 struct pcnet32_private *lp = netdev_priv(dev);
34143- struct pcnet32_access *a = &lp->a;
34144+ struct pcnet32_access *a = lp->a;
34145 ulong ioaddr = dev->base_addr;
34146 int ticks;
34147
34148@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_stru
34149 spin_lock_irqsave(&lp->lock, flags);
34150 if (pcnet32_tx(dev)) {
34151 /* reset the chip to clear the error condition, then restart */
34152- lp->a.reset(ioaddr);
34153- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34154+ lp->a->reset(ioaddr);
34155+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34156 pcnet32_restart(dev, CSR0_START);
34157 netif_wake_queue(dev);
34158 }
34159@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_stru
34160 __napi_complete(napi);
34161
34162 /* clear interrupt masks */
34163- val = lp->a.read_csr(ioaddr, CSR3);
34164+ val = lp->a->read_csr(ioaddr, CSR3);
34165 val &= 0x00ff;
34166- lp->a.write_csr(ioaddr, CSR3, val);
34167+ lp->a->write_csr(ioaddr, CSR3, val);
34168
34169 /* Set interrupt enable. */
34170- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
34171+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
34172
34173 spin_unlock_irqrestore(&lp->lock, flags);
34174 }
34175@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_
34176 int i, csr0;
34177 u16 *buff = ptr;
34178 struct pcnet32_private *lp = netdev_priv(dev);
34179- struct pcnet32_access *a = &lp->a;
34180+ struct pcnet32_access *a = lp->a;
34181 ulong ioaddr = dev->base_addr;
34182 unsigned long flags;
34183
34184@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_
34185 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
34186 if (lp->phymask & (1 << j)) {
34187 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
34188- lp->a.write_bcr(ioaddr, 33,
34189+ lp->a->write_bcr(ioaddr, 33,
34190 (j << 5) | i);
34191- *buff++ = lp->a.read_bcr(ioaddr, 34);
34192+ *buff++ = lp->a->read_bcr(ioaddr, 34);
34193 }
34194 }
34195 }
34196@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34197 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
34198 lp->options |= PCNET32_PORT_FD;
34199
34200- lp->a = *a;
34201+ lp->a = a;
34202
34203 /* prior to register_netdev, dev->name is not yet correct */
34204 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
34205@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34206 if (lp->mii) {
34207 /* lp->phycount and lp->phymask are set to 0 by memset above */
34208
34209- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34210+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34211 /* scan for PHYs */
34212 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34213 unsigned short id1, id2;
34214@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34215 "Found PHY %04x:%04x at address %d.\n",
34216 id1, id2, i);
34217 }
34218- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34219+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34220 if (lp->phycount > 1) {
34221 lp->options |= PCNET32_PORT_MII;
34222 }
34223@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_devic
34224 }
34225
34226 /* Reset the PCNET32 */
34227- lp->a.reset(ioaddr);
34228+ lp->a->reset(ioaddr);
34229
34230 /* switch pcnet32 to 32bit mode */
34231- lp->a.write_bcr(ioaddr, 20, 2);
34232+ lp->a->write_bcr(ioaddr, 20, 2);
34233
34234 if (netif_msg_ifup(lp))
34235 printk(KERN_DEBUG
34236@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_devic
34237 (u32) (lp->init_dma_addr));
34238
34239 /* set/reset autoselect bit */
34240- val = lp->a.read_bcr(ioaddr, 2) & ~2;
34241+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
34242 if (lp->options & PCNET32_PORT_ASEL)
34243 val |= 2;
34244- lp->a.write_bcr(ioaddr, 2, val);
34245+ lp->a->write_bcr(ioaddr, 2, val);
34246
34247 /* handle full duplex setting */
34248 if (lp->mii_if.full_duplex) {
34249- val = lp->a.read_bcr(ioaddr, 9) & ~3;
34250+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
34251 if (lp->options & PCNET32_PORT_FD) {
34252 val |= 1;
34253 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
34254@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_devic
34255 if (lp->chip_version == 0x2627)
34256 val |= 3;
34257 }
34258- lp->a.write_bcr(ioaddr, 9, val);
34259+ lp->a->write_bcr(ioaddr, 9, val);
34260 }
34261
34262 /* set/reset GPSI bit in test register */
34263- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
34264+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
34265 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
34266 val |= 0x10;
34267- lp->a.write_csr(ioaddr, 124, val);
34268+ lp->a->write_csr(ioaddr, 124, val);
34269
34270 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
34271 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
34272@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_devic
34273 * duplex, and/or enable auto negotiation, and clear DANAS
34274 */
34275 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
34276- lp->a.write_bcr(ioaddr, 32,
34277- lp->a.read_bcr(ioaddr, 32) | 0x0080);
34278+ lp->a->write_bcr(ioaddr, 32,
34279+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
34280 /* disable Auto Negotiation, set 10Mpbs, HD */
34281- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
34282+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
34283 if (lp->options & PCNET32_PORT_FD)
34284 val |= 0x10;
34285 if (lp->options & PCNET32_PORT_100)
34286 val |= 0x08;
34287- lp->a.write_bcr(ioaddr, 32, val);
34288+ lp->a->write_bcr(ioaddr, 32, val);
34289 } else {
34290 if (lp->options & PCNET32_PORT_ASEL) {
34291- lp->a.write_bcr(ioaddr, 32,
34292- lp->a.read_bcr(ioaddr,
34293+ lp->a->write_bcr(ioaddr, 32,
34294+ lp->a->read_bcr(ioaddr,
34295 32) | 0x0080);
34296 /* enable auto negotiate, setup, disable fd */
34297- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
34298+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
34299 val |= 0x20;
34300- lp->a.write_bcr(ioaddr, 32, val);
34301+ lp->a->write_bcr(ioaddr, 32, val);
34302 }
34303 }
34304 } else {
34305@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_devic
34306 * There is really no good other way to handle multiple PHYs
34307 * other than turning off all automatics
34308 */
34309- val = lp->a.read_bcr(ioaddr, 2);
34310- lp->a.write_bcr(ioaddr, 2, val & ~2);
34311- val = lp->a.read_bcr(ioaddr, 32);
34312- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34313+ val = lp->a->read_bcr(ioaddr, 2);
34314+ lp->a->write_bcr(ioaddr, 2, val & ~2);
34315+ val = lp->a->read_bcr(ioaddr, 32);
34316+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34317
34318 if (!(lp->options & PCNET32_PORT_ASEL)) {
34319 /* setup ecmd */
34320@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_devic
34321 ecmd.speed =
34322 lp->
34323 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
34324- bcr9 = lp->a.read_bcr(ioaddr, 9);
34325+ bcr9 = lp->a->read_bcr(ioaddr, 9);
34326
34327 if (lp->options & PCNET32_PORT_FD) {
34328 ecmd.duplex = DUPLEX_FULL;
34329@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_devic
34330 ecmd.duplex = DUPLEX_HALF;
34331 bcr9 |= ~(1 << 0);
34332 }
34333- lp->a.write_bcr(ioaddr, 9, bcr9);
34334+ lp->a->write_bcr(ioaddr, 9, bcr9);
34335 }
34336
34337 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34338@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_devic
34339
34340 #ifdef DO_DXSUFLO
34341 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
34342- val = lp->a.read_csr(ioaddr, CSR3);
34343+ val = lp->a->read_csr(ioaddr, CSR3);
34344 val |= 0x40;
34345- lp->a.write_csr(ioaddr, CSR3, val);
34346+ lp->a->write_csr(ioaddr, CSR3, val);
34347 }
34348 #endif
34349
34350@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_devic
34351 napi_enable(&lp->napi);
34352
34353 /* Re-initialize the PCNET32, and start it when done. */
34354- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34355- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34356+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34357+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34358
34359- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34360- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34361+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34362+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34363
34364 netif_start_queue(dev);
34365
34366@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_devic
34367
34368 i = 0;
34369 while (i++ < 100)
34370- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34371+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34372 break;
34373 /*
34374 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
34375 * reports that doing so triggers a bug in the '974.
34376 */
34377- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
34378+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
34379
34380 if (netif_msg_ifup(lp))
34381 printk(KERN_DEBUG
34382 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
34383 dev->name, i,
34384 (u32) (lp->init_dma_addr),
34385- lp->a.read_csr(ioaddr, CSR0));
34386+ lp->a->read_csr(ioaddr, CSR0));
34387
34388 spin_unlock_irqrestore(&lp->lock, flags);
34389
34390@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_devic
34391 * Switch back to 16bit mode to avoid problems with dumb
34392 * DOS packet driver after a warm reboot
34393 */
34394- lp->a.write_bcr(ioaddr, 20, 4);
34395+ lp->a->write_bcr(ioaddr, 20, 4);
34396
34397 err_free_irq:
34398 spin_unlock_irqrestore(&lp->lock, flags);
34399@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_d
34400
34401 /* wait for stop */
34402 for (i = 0; i < 100; i++)
34403- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
34404+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
34405 break;
34406
34407 if (i >= 100 && netif_msg_drv(lp))
34408@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_d
34409 return;
34410
34411 /* ReInit Ring */
34412- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34413+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34414 i = 0;
34415 while (i++ < 1000)
34416- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34417+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34418 break;
34419
34420- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
34421+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
34422 }
34423
34424 static void pcnet32_tx_timeout(struct net_device *dev)
34425@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct ne
34426 if (pcnet32_debug & NETIF_MSG_DRV)
34427 printk(KERN_ERR
34428 "%s: transmit timed out, status %4.4x, resetting.\n",
34429- dev->name, lp->a.read_csr(ioaddr, CSR0));
34430- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34431+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34432+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34433 dev->stats.tx_errors++;
34434 if (netif_msg_tx_err(lp)) {
34435 int i;
34436@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34437 if (netif_msg_tx_queued(lp)) {
34438 printk(KERN_DEBUG
34439 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
34440- dev->name, lp->a.read_csr(ioaddr, CSR0));
34441+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34442 }
34443
34444 /* Default status -- will not enable Successful-TxDone
34445@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34446 dev->stats.tx_bytes += skb->len;
34447
34448 /* Trigger an immediate send poll. */
34449- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34450+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34451
34452 dev->trans_start = jiffies;
34453
34454@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
34455
34456 spin_lock(&lp->lock);
34457
34458- csr0 = lp->a.read_csr(ioaddr, CSR0);
34459+ csr0 = lp->a->read_csr(ioaddr, CSR0);
34460 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
34461 if (csr0 == 0xffff) {
34462 break; /* PCMCIA remove happened */
34463 }
34464 /* Acknowledge all of the current interrupt sources ASAP. */
34465- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34466+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34467
34468 if (netif_msg_intr(lp))
34469 printk(KERN_DEBUG
34470 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
34471- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
34472+ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
34473
34474 /* Log misc errors. */
34475 if (csr0 & 0x4000)
34476@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
34477 if (napi_schedule_prep(&lp->napi)) {
34478 u16 val;
34479 /* set interrupt masks */
34480- val = lp->a.read_csr(ioaddr, CSR3);
34481+ val = lp->a->read_csr(ioaddr, CSR3);
34482 val |= 0x5f00;
34483- lp->a.write_csr(ioaddr, CSR3, val);
34484+ lp->a->write_csr(ioaddr, CSR3, val);
34485
34486 __napi_schedule(&lp->napi);
34487 break;
34488 }
34489- csr0 = lp->a.read_csr(ioaddr, CSR0);
34490+ csr0 = lp->a->read_csr(ioaddr, CSR0);
34491 }
34492
34493 if (netif_msg_intr(lp))
34494 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
34495- dev->name, lp->a.read_csr(ioaddr, CSR0));
34496+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34497
34498 spin_unlock(&lp->lock);
34499
34500@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_devi
34501
34502 spin_lock_irqsave(&lp->lock, flags);
34503
34504- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34505+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34506
34507 if (netif_msg_ifdown(lp))
34508 printk(KERN_DEBUG
34509 "%s: Shutting down ethercard, status was %2.2x.\n",
34510- dev->name, lp->a.read_csr(ioaddr, CSR0));
34511+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34512
34513 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
34514- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34515+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34516
34517 /*
34518 * Switch back to 16bit mode to avoid problems with dumb
34519 * DOS packet driver after a warm reboot
34520 */
34521- lp->a.write_bcr(ioaddr, 20, 4);
34522+ lp->a->write_bcr(ioaddr, 20, 4);
34523
34524 spin_unlock_irqrestore(&lp->lock, flags);
34525
34526@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_
34527 unsigned long flags;
34528
34529 spin_lock_irqsave(&lp->lock, flags);
34530- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34531+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34532 spin_unlock_irqrestore(&lp->lock, flags);
34533
34534 return &dev->stats;
34535@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struc
34536 if (dev->flags & IFF_ALLMULTI) {
34537 ib->filter[0] = cpu_to_le32(~0U);
34538 ib->filter[1] = cpu_to_le32(~0U);
34539- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34540- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34541- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34542- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34543+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34544+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34545+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34546+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34547 return;
34548 }
34549 /* clear the multicast filter */
34550@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struc
34551 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
34552 }
34553 for (i = 0; i < 4; i++)
34554- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
34555+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
34556 le16_to_cpu(mcast_table[i]));
34557 return;
34558 }
34559@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(s
34560
34561 spin_lock_irqsave(&lp->lock, flags);
34562 suspended = pcnet32_suspend(dev, &flags, 0);
34563- csr15 = lp->a.read_csr(ioaddr, CSR15);
34564+ csr15 = lp->a->read_csr(ioaddr, CSR15);
34565 if (dev->flags & IFF_PROMISC) {
34566 /* Log any net taps. */
34567 if (netif_msg_hw(lp))
34568@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(s
34569 lp->init_block->mode =
34570 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
34571 7);
34572- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
34573+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
34574 } else {
34575 lp->init_block->mode =
34576 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
34577- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34578+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34579 pcnet32_load_multicast(dev);
34580 }
34581
34582 if (suspended) {
34583 int csr5;
34584 /* clear SUSPEND (SPND) - CSR5 bit 0 */
34585- csr5 = lp->a.read_csr(ioaddr, CSR5);
34586- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34587+ csr5 = lp->a->read_csr(ioaddr, CSR5);
34588+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34589 } else {
34590- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34591+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34592 pcnet32_restart(dev, CSR0_NORMAL);
34593 netif_wake_queue(dev);
34594 }
34595@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *
34596 if (!lp->mii)
34597 return 0;
34598
34599- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34600- val_out = lp->a.read_bcr(ioaddr, 34);
34601+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34602+ val_out = lp->a->read_bcr(ioaddr, 34);
34603
34604 return val_out;
34605 }
34606@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device
34607 if (!lp->mii)
34608 return;
34609
34610- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34611- lp->a.write_bcr(ioaddr, 34, val);
34612+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34613+ lp->a->write_bcr(ioaddr, 34, val);
34614 }
34615
34616 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34617@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct n
34618 curr_link = mii_link_ok(&lp->mii_if);
34619 } else {
34620 ulong ioaddr = dev->base_addr; /* card base I/O address */
34621- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34622+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34623 }
34624 if (!curr_link) {
34625 if (prev_link || verbose) {
34626@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct n
34627 (ecmd.duplex ==
34628 DUPLEX_FULL) ? "full" : "half");
34629 }
34630- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
34631+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
34632 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
34633 if (lp->mii_if.full_duplex)
34634 bcr9 |= (1 << 0);
34635 else
34636 bcr9 &= ~(1 << 0);
34637- lp->a.write_bcr(dev->base_addr, 9, bcr9);
34638+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
34639 }
34640 } else {
34641 if (netif_msg_link(lp))
34642diff -urNp linux-2.6.32.46/drivers/net/tg3.h linux-2.6.32.46/drivers/net/tg3.h
34643--- linux-2.6.32.46/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
34644+++ linux-2.6.32.46/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
34645@@ -95,6 +95,7 @@
34646 #define CHIPREV_ID_5750_A0 0x4000
34647 #define CHIPREV_ID_5750_A1 0x4001
34648 #define CHIPREV_ID_5750_A3 0x4003
34649+#define CHIPREV_ID_5750_C1 0x4201
34650 #define CHIPREV_ID_5750_C2 0x4202
34651 #define CHIPREV_ID_5752_A0_HW 0x5000
34652 #define CHIPREV_ID_5752_A0 0x6000
34653diff -urNp linux-2.6.32.46/drivers/net/tokenring/abyss.c linux-2.6.32.46/drivers/net/tokenring/abyss.c
34654--- linux-2.6.32.46/drivers/net/tokenring/abyss.c 2011-03-27 14:31:47.000000000 -0400
34655+++ linux-2.6.32.46/drivers/net/tokenring/abyss.c 2011-08-05 20:33:55.000000000 -0400
34656@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
34657
34658 static int __init abyss_init (void)
34659 {
34660- abyss_netdev_ops = tms380tr_netdev_ops;
34661+ pax_open_kernel();
34662+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34663
34664- abyss_netdev_ops.ndo_open = abyss_open;
34665- abyss_netdev_ops.ndo_stop = abyss_close;
34666+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34667+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34668+ pax_close_kernel();
34669
34670 return pci_register_driver(&abyss_driver);
34671 }
34672diff -urNp linux-2.6.32.46/drivers/net/tokenring/madgemc.c linux-2.6.32.46/drivers/net/tokenring/madgemc.c
34673--- linux-2.6.32.46/drivers/net/tokenring/madgemc.c 2011-03-27 14:31:47.000000000 -0400
34674+++ linux-2.6.32.46/drivers/net/tokenring/madgemc.c 2011-08-05 20:33:55.000000000 -0400
34675@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver
34676
34677 static int __init madgemc_init (void)
34678 {
34679- madgemc_netdev_ops = tms380tr_netdev_ops;
34680- madgemc_netdev_ops.ndo_open = madgemc_open;
34681- madgemc_netdev_ops.ndo_stop = madgemc_close;
34682+ pax_open_kernel();
34683+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34684+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34685+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34686+ pax_close_kernel();
34687
34688 return mca_register_driver (&madgemc_driver);
34689 }
34690diff -urNp linux-2.6.32.46/drivers/net/tokenring/proteon.c linux-2.6.32.46/drivers/net/tokenring/proteon.c
34691--- linux-2.6.32.46/drivers/net/tokenring/proteon.c 2011-03-27 14:31:47.000000000 -0400
34692+++ linux-2.6.32.46/drivers/net/tokenring/proteon.c 2011-08-05 20:33:55.000000000 -0400
34693@@ -353,9 +353,11 @@ static int __init proteon_init(void)
34694 struct platform_device *pdev;
34695 int i, num = 0, err = 0;
34696
34697- proteon_netdev_ops = tms380tr_netdev_ops;
34698- proteon_netdev_ops.ndo_open = proteon_open;
34699- proteon_netdev_ops.ndo_stop = tms380tr_close;
34700+ pax_open_kernel();
34701+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34702+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34703+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34704+ pax_close_kernel();
34705
34706 err = platform_driver_register(&proteon_driver);
34707 if (err)
34708diff -urNp linux-2.6.32.46/drivers/net/tokenring/skisa.c linux-2.6.32.46/drivers/net/tokenring/skisa.c
34709--- linux-2.6.32.46/drivers/net/tokenring/skisa.c 2011-03-27 14:31:47.000000000 -0400
34710+++ linux-2.6.32.46/drivers/net/tokenring/skisa.c 2011-08-05 20:33:55.000000000 -0400
34711@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
34712 struct platform_device *pdev;
34713 int i, num = 0, err = 0;
34714
34715- sk_isa_netdev_ops = tms380tr_netdev_ops;
34716- sk_isa_netdev_ops.ndo_open = sk_isa_open;
34717- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34718+ pax_open_kernel();
34719+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34720+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34721+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34722+ pax_close_kernel();
34723
34724 err = platform_driver_register(&sk_isa_driver);
34725 if (err)
34726diff -urNp linux-2.6.32.46/drivers/net/tulip/de2104x.c linux-2.6.32.46/drivers/net/tulip/de2104x.c
34727--- linux-2.6.32.46/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
34728+++ linux-2.6.32.46/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
34729@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
34730 struct de_srom_info_leaf *il;
34731 void *bufp;
34732
34733+ pax_track_stack();
34734+
34735 /* download entire eeprom */
34736 for (i = 0; i < DE_EEPROM_WORDS; i++)
34737 ((__le16 *)ee_data)[i] =
34738diff -urNp linux-2.6.32.46/drivers/net/tulip/de4x5.c linux-2.6.32.46/drivers/net/tulip/de4x5.c
34739--- linux-2.6.32.46/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
34740+++ linux-2.6.32.46/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
34741@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
34742 for (i=0; i<ETH_ALEN; i++) {
34743 tmp.addr[i] = dev->dev_addr[i];
34744 }
34745- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34746+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34747 break;
34748
34749 case DE4X5_SET_HWADDR: /* Set the hardware address */
34750@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
34751 spin_lock_irqsave(&lp->lock, flags);
34752 memcpy(&statbuf, &lp->pktStats, ioc->len);
34753 spin_unlock_irqrestore(&lp->lock, flags);
34754- if (copy_to_user(ioc->data, &statbuf, ioc->len))
34755+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34756 return -EFAULT;
34757 break;
34758 }
34759diff -urNp linux-2.6.32.46/drivers/net/usb/hso.c linux-2.6.32.46/drivers/net/usb/hso.c
34760--- linux-2.6.32.46/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
34761+++ linux-2.6.32.46/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
34762@@ -71,7 +71,7 @@
34763 #include <asm/byteorder.h>
34764 #include <linux/serial_core.h>
34765 #include <linux/serial.h>
34766-
34767+#include <asm/local.h>
34768
34769 #define DRIVER_VERSION "1.2"
34770 #define MOD_AUTHOR "Option Wireless"
34771@@ -258,7 +258,7 @@ struct hso_serial {
34772
34773 /* from usb_serial_port */
34774 struct tty_struct *tty;
34775- int open_count;
34776+ local_t open_count;
34777 spinlock_t serial_lock;
34778
34779 int (*write_data) (struct hso_serial *serial);
34780@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
34781 struct urb *urb;
34782
34783 urb = serial->rx_urb[0];
34784- if (serial->open_count > 0) {
34785+ if (local_read(&serial->open_count) > 0) {
34786 count = put_rxbuf_data(urb, serial);
34787 if (count == -1)
34788 return;
34789@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
34790 DUMP1(urb->transfer_buffer, urb->actual_length);
34791
34792 /* Anyone listening? */
34793- if (serial->open_count == 0)
34794+ if (local_read(&serial->open_count) == 0)
34795 return;
34796
34797 if (status == 0) {
34798@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
34799 spin_unlock_irq(&serial->serial_lock);
34800
34801 /* check for port already opened, if not set the termios */
34802- serial->open_count++;
34803- if (serial->open_count == 1) {
34804+ if (local_inc_return(&serial->open_count) == 1) {
34805 tty->low_latency = 1;
34806 serial->rx_state = RX_IDLE;
34807 /* Force default termio settings */
34808@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
34809 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
34810 if (result) {
34811 hso_stop_serial_device(serial->parent);
34812- serial->open_count--;
34813+ local_dec(&serial->open_count);
34814 kref_put(&serial->parent->ref, hso_serial_ref_free);
34815 }
34816 } else {
34817@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
34818
34819 /* reset the rts and dtr */
34820 /* do the actual close */
34821- serial->open_count--;
34822+ local_dec(&serial->open_count);
34823
34824- if (serial->open_count <= 0) {
34825- serial->open_count = 0;
34826+ if (local_read(&serial->open_count) <= 0) {
34827+ local_set(&serial->open_count, 0);
34828 spin_lock_irq(&serial->serial_lock);
34829 if (serial->tty == tty) {
34830 serial->tty->driver_data = NULL;
34831@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
34832
34833 /* the actual setup */
34834 spin_lock_irqsave(&serial->serial_lock, flags);
34835- if (serial->open_count)
34836+ if (local_read(&serial->open_count))
34837 _hso_serial_set_termios(tty, old);
34838 else
34839 tty->termios = old;
34840@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
34841 /* Start all serial ports */
34842 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
34843 if (serial_table[i] && (serial_table[i]->interface == iface)) {
34844- if (dev2ser(serial_table[i])->open_count) {
34845+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
34846 result =
34847 hso_start_serial_device(serial_table[i], GFP_NOIO);
34848 hso_kick_transmit(dev2ser(serial_table[i]));
34849diff -urNp linux-2.6.32.46/drivers/net/vxge/vxge-config.h linux-2.6.32.46/drivers/net/vxge/vxge-config.h
34850--- linux-2.6.32.46/drivers/net/vxge/vxge-config.h 2011-03-27 14:31:47.000000000 -0400
34851+++ linux-2.6.32.46/drivers/net/vxge/vxge-config.h 2011-08-05 20:33:55.000000000 -0400
34852@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
34853 void (*link_down)(struct __vxge_hw_device *devh);
34854 void (*crit_err)(struct __vxge_hw_device *devh,
34855 enum vxge_hw_event type, u64 ext_data);
34856-};
34857+} __no_const;
34858
34859 /*
34860 * struct __vxge_hw_blockpool_entry - Block private data structure
34861diff -urNp linux-2.6.32.46/drivers/net/vxge/vxge-main.c linux-2.6.32.46/drivers/net/vxge/vxge-main.c
34862--- linux-2.6.32.46/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
34863+++ linux-2.6.32.46/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
34864@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
34865 struct sk_buff *completed[NR_SKB_COMPLETED];
34866 int more;
34867
34868+ pax_track_stack();
34869+
34870 do {
34871 more = 0;
34872 skb_ptr = completed;
34873@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
34874 u8 mtable[256] = {0}; /* CPU to vpath mapping */
34875 int index;
34876
34877+ pax_track_stack();
34878+
34879 /*
34880 * Filling
34881 * - itable with bucket numbers
34882diff -urNp linux-2.6.32.46/drivers/net/vxge/vxge-traffic.h linux-2.6.32.46/drivers/net/vxge/vxge-traffic.h
34883--- linux-2.6.32.46/drivers/net/vxge/vxge-traffic.h 2011-03-27 14:31:47.000000000 -0400
34884+++ linux-2.6.32.46/drivers/net/vxge/vxge-traffic.h 2011-08-05 20:33:55.000000000 -0400
34885@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
34886 struct vxge_hw_mempool_dma *dma_object,
34887 u32 index,
34888 u32 is_last);
34889-};
34890+} __no_const;
34891
34892 void
34893 __vxge_hw_mempool_destroy(
34894diff -urNp linux-2.6.32.46/drivers/net/wan/cycx_x25.c linux-2.6.32.46/drivers/net/wan/cycx_x25.c
34895--- linux-2.6.32.46/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
34896+++ linux-2.6.32.46/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
34897@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
34898 unsigned char hex[1024],
34899 * phex = hex;
34900
34901+ pax_track_stack();
34902+
34903 if (len >= (sizeof(hex) / 2))
34904 len = (sizeof(hex) / 2) - 1;
34905
34906diff -urNp linux-2.6.32.46/drivers/net/wan/hdlc_x25.c linux-2.6.32.46/drivers/net/wan/hdlc_x25.c
34907--- linux-2.6.32.46/drivers/net/wan/hdlc_x25.c 2011-03-27 14:31:47.000000000 -0400
34908+++ linux-2.6.32.46/drivers/net/wan/hdlc_x25.c 2011-08-05 20:33:55.000000000 -0400
34909@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
34910
34911 static int x25_open(struct net_device *dev)
34912 {
34913- struct lapb_register_struct cb;
34914+ static struct lapb_register_struct cb = {
34915+ .connect_confirmation = x25_connected,
34916+ .connect_indication = x25_connected,
34917+ .disconnect_confirmation = x25_disconnected,
34918+ .disconnect_indication = x25_disconnected,
34919+ .data_indication = x25_data_indication,
34920+ .data_transmit = x25_data_transmit
34921+ };
34922 int result;
34923
34924- cb.connect_confirmation = x25_connected;
34925- cb.connect_indication = x25_connected;
34926- cb.disconnect_confirmation = x25_disconnected;
34927- cb.disconnect_indication = x25_disconnected;
34928- cb.data_indication = x25_data_indication;
34929- cb.data_transmit = x25_data_transmit;
34930-
34931 result = lapb_register(dev, &cb);
34932 if (result != LAPB_OK)
34933 return result;
34934diff -urNp linux-2.6.32.46/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.46/drivers/net/wimax/i2400m/usb-fw.c
34935--- linux-2.6.32.46/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
34936+++ linux-2.6.32.46/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
34937@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
34938 int do_autopm = 1;
34939 DECLARE_COMPLETION_ONSTACK(notif_completion);
34940
34941+ pax_track_stack();
34942+
34943 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
34944 i2400m, ack, ack_size);
34945 BUG_ON(_ack == i2400m->bm_ack_buf);
34946diff -urNp linux-2.6.32.46/drivers/net/wireless/airo.c linux-2.6.32.46/drivers/net/wireless/airo.c
34947--- linux-2.6.32.46/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
34948+++ linux-2.6.32.46/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
34949@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
34950 BSSListElement * loop_net;
34951 BSSListElement * tmp_net;
34952
34953+ pax_track_stack();
34954+
34955 /* Blow away current list of scan results */
34956 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
34957 list_move_tail (&loop_net->list, &ai->network_free_list);
34958@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
34959 WepKeyRid wkr;
34960 int rc;
34961
34962+ pax_track_stack();
34963+
34964 memset( &mySsid, 0, sizeof( mySsid ) );
34965 kfree (ai->flash);
34966 ai->flash = NULL;
34967@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
34968 __le32 *vals = stats.vals;
34969 int len;
34970
34971+ pax_track_stack();
34972+
34973 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34974 return -ENOMEM;
34975 data = (struct proc_data *)file->private_data;
34976@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
34977 /* If doLoseSync is not 1, we won't do a Lose Sync */
34978 int doLoseSync = -1;
34979
34980+ pax_track_stack();
34981+
34982 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34983 return -ENOMEM;
34984 data = (struct proc_data *)file->private_data;
34985@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
34986 int i;
34987 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
34988
34989+ pax_track_stack();
34990+
34991 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
34992 if (!qual)
34993 return -ENOMEM;
34994@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
34995 CapabilityRid cap_rid;
34996 __le32 *vals = stats_rid.vals;
34997
34998+ pax_track_stack();
34999+
35000 /* Get stats out of the card */
35001 clear_bit(JOB_WSTATS, &local->jobs);
35002 if (local->power.event) {
35003diff -urNp linux-2.6.32.46/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.46/drivers/net/wireless/ath/ath5k/debug.c
35004--- linux-2.6.32.46/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
35005+++ linux-2.6.32.46/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
35006@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
35007 unsigned int v;
35008 u64 tsf;
35009
35010+ pax_track_stack();
35011+
35012 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
35013 len += snprintf(buf+len, sizeof(buf)-len,
35014 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
35015@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
35016 unsigned int len = 0;
35017 unsigned int i;
35018
35019+ pax_track_stack();
35020+
35021 len += snprintf(buf+len, sizeof(buf)-len,
35022 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
35023
35024diff -urNp linux-2.6.32.46/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.46/drivers/net/wireless/ath/ath9k/debug.c
35025--- linux-2.6.32.46/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
35026+++ linux-2.6.32.46/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
35027@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
35028 char buf[512];
35029 unsigned int len = 0;
35030
35031+ pax_track_stack();
35032+
35033 len += snprintf(buf + len, sizeof(buf) - len,
35034 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
35035 len += snprintf(buf + len, sizeof(buf) - len,
35036@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
35037 int i;
35038 u8 addr[ETH_ALEN];
35039
35040+ pax_track_stack();
35041+
35042 len += snprintf(buf + len, sizeof(buf) - len,
35043 "primary: %s (%s chan=%d ht=%d)\n",
35044 wiphy_name(sc->pri_wiphy->hw->wiphy),
35045diff -urNp linux-2.6.32.46/drivers/net/wireless/b43/debugfs.c linux-2.6.32.46/drivers/net/wireless/b43/debugfs.c
35046--- linux-2.6.32.46/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35047+++ linux-2.6.32.46/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35048@@ -43,7 +43,7 @@ static struct dentry *rootdir;
35049 struct b43_debugfs_fops {
35050 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
35051 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
35052- struct file_operations fops;
35053+ const struct file_operations fops;
35054 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
35055 size_t file_struct_offset;
35056 };
35057diff -urNp linux-2.6.32.46/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.46/drivers/net/wireless/b43legacy/debugfs.c
35058--- linux-2.6.32.46/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35059+++ linux-2.6.32.46/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35060@@ -44,7 +44,7 @@ static struct dentry *rootdir;
35061 struct b43legacy_debugfs_fops {
35062 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
35063 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
35064- struct file_operations fops;
35065+ const struct file_operations fops;
35066 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
35067 size_t file_struct_offset;
35068 /* Take wl->irq_lock before calling read/write? */
35069diff -urNp linux-2.6.32.46/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.46/drivers/net/wireless/ipw2x00/ipw2100.c
35070--- linux-2.6.32.46/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
35071+++ linux-2.6.32.46/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
35072@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
35073 int err;
35074 DECLARE_SSID_BUF(ssid);
35075
35076+ pax_track_stack();
35077+
35078 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
35079
35080 if (ssid_len)
35081@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
35082 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
35083 int err;
35084
35085+ pax_track_stack();
35086+
35087 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
35088 idx, keylen, len);
35089
35090diff -urNp linux-2.6.32.46/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.46/drivers/net/wireless/ipw2x00/libipw_rx.c
35091--- linux-2.6.32.46/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
35092+++ linux-2.6.32.46/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
35093@@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
35094 unsigned long flags;
35095 DECLARE_SSID_BUF(ssid);
35096
35097+ pax_track_stack();
35098+
35099 LIBIPW_DEBUG_SCAN("'%s' (%pM"
35100 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
35101 print_ssid(ssid, info_element->data, info_element->len),
35102diff -urNp linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-1000.c
35103--- linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
35104+++ linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
35105@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
35106 },
35107 };
35108
35109-static struct iwl_ops iwl1000_ops = {
35110+static const struct iwl_ops iwl1000_ops = {
35111 .ucode = &iwl5000_ucode,
35112 .lib = &iwl1000_lib,
35113 .hcmd = &iwl5000_hcmd,
35114diff -urNp linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl3945-base.c linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl3945-base.c
35115--- linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-03-27 14:31:47.000000000 -0400
35116+++ linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-08-05 20:33:55.000000000 -0400
35117@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_
35118 */
35119 if (iwl3945_mod_params.disable_hw_scan) {
35120 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
35121- iwl3945_hw_ops.hw_scan = NULL;
35122+ pax_open_kernel();
35123+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
35124+ pax_close_kernel();
35125 }
35126
35127
35128diff -urNp linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-3945.c
35129--- linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
35130+++ linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
35131@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
35132 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
35133 };
35134
35135-static struct iwl_ops iwl3945_ops = {
35136+static const struct iwl_ops iwl3945_ops = {
35137 .ucode = &iwl3945_ucode,
35138 .lib = &iwl3945_lib,
35139 .hcmd = &iwl3945_hcmd,
35140diff -urNp linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-4965.c
35141--- linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
35142+++ linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
35143@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
35144 },
35145 };
35146
35147-static struct iwl_ops iwl4965_ops = {
35148+static const struct iwl_ops iwl4965_ops = {
35149 .ucode = &iwl4965_ucode,
35150 .lib = &iwl4965_lib,
35151 .hcmd = &iwl4965_hcmd,
35152diff -urNp linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-5000.c
35153--- linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:55:34.000000000 -0400
35154+++ linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:56:37.000000000 -0400
35155@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
35156 },
35157 };
35158
35159-struct iwl_ops iwl5000_ops = {
35160+const struct iwl_ops iwl5000_ops = {
35161 .ucode = &iwl5000_ucode,
35162 .lib = &iwl5000_lib,
35163 .hcmd = &iwl5000_hcmd,
35164 .utils = &iwl5000_hcmd_utils,
35165 };
35166
35167-static struct iwl_ops iwl5150_ops = {
35168+static const struct iwl_ops iwl5150_ops = {
35169 .ucode = &iwl5000_ucode,
35170 .lib = &iwl5150_lib,
35171 .hcmd = &iwl5000_hcmd,
35172diff -urNp linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-6000.c
35173--- linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
35174+++ linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
35175@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
35176 .calc_rssi = iwl5000_calc_rssi,
35177 };
35178
35179-static struct iwl_ops iwl6000_ops = {
35180+static const struct iwl_ops iwl6000_ops = {
35181 .ucode = &iwl5000_ucode,
35182 .lib = &iwl6000_lib,
35183 .hcmd = &iwl5000_hcmd,
35184diff -urNp linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-agn.c
35185--- linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-03-27 14:31:47.000000000 -0400
35186+++ linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-08-05 20:33:55.000000000 -0400
35187@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev
35188 if (iwl_debug_level & IWL_DL_INFO)
35189 dev_printk(KERN_DEBUG, &(pdev->dev),
35190 "Disabling hw_scan\n");
35191- iwl_hw_ops.hw_scan = NULL;
35192+ pax_open_kernel();
35193+ *(void **)&iwl_hw_ops.hw_scan = NULL;
35194+ pax_close_kernel();
35195 }
35196
35197 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
35198diff -urNp linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
35199--- linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
35200+++ linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
35201@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
35202 u8 active_index = 0;
35203 s32 tpt = 0;
35204
35205+ pax_track_stack();
35206+
35207 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
35208
35209 if (!ieee80211_is_data(hdr->frame_control) ||
35210@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
35211 u8 valid_tx_ant = 0;
35212 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
35213
35214+ pax_track_stack();
35215+
35216 /* Override starting rate (index 0) if needed for debug purposes */
35217 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
35218
35219diff -urNp linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35220--- linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
35221+++ linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
35222@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
35223 int pos = 0;
35224 const size_t bufsz = sizeof(buf);
35225
35226+ pax_track_stack();
35227+
35228 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
35229 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
35230 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
35231@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
35232 const size_t bufsz = sizeof(buf);
35233 ssize_t ret;
35234
35235+ pax_track_stack();
35236+
35237 for (i = 0; i < AC_NUM; i++) {
35238 pos += scnprintf(buf + pos, bufsz - pos,
35239 "\tcw_min\tcw_max\taifsn\ttxop\n");
35240diff -urNp linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-debug.h
35241--- linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
35242+++ linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
35243@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
35244 #endif
35245
35246 #else
35247-#define IWL_DEBUG(__priv, level, fmt, args...)
35248-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
35249+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
35250+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
35251 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
35252 void *p, u32 len)
35253 {}
35254diff -urNp linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-dev.h
35255--- linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
35256+++ linux-2.6.32.46/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
35257@@ -68,7 +68,7 @@ struct iwl_tx_queue;
35258
35259 /* shared structures from iwl-5000.c */
35260 extern struct iwl_mod_params iwl50_mod_params;
35261-extern struct iwl_ops iwl5000_ops;
35262+extern const struct iwl_ops iwl5000_ops;
35263 extern struct iwl_ucode_ops iwl5000_ucode;
35264 extern struct iwl_lib_ops iwl5000_lib;
35265 extern struct iwl_hcmd_ops iwl5000_hcmd;
35266diff -urNp linux-2.6.32.46/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.46/drivers/net/wireless/iwmc3200wifi/debugfs.c
35267--- linux-2.6.32.46/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35268+++ linux-2.6.32.46/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
35269@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
35270 int buf_len = 512;
35271 size_t len = 0;
35272
35273+ pax_track_stack();
35274+
35275 if (*ppos != 0)
35276 return 0;
35277 if (count < sizeof(buf))
35278diff -urNp linux-2.6.32.46/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.46/drivers/net/wireless/libertas/debugfs.c
35279--- linux-2.6.32.46/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35280+++ linux-2.6.32.46/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35281@@ -708,7 +708,7 @@ out_unlock:
35282 struct lbs_debugfs_files {
35283 const char *name;
35284 int perm;
35285- struct file_operations fops;
35286+ const struct file_operations fops;
35287 };
35288
35289 static const struct lbs_debugfs_files debugfs_files[] = {
35290diff -urNp linux-2.6.32.46/drivers/net/wireless/rndis_wlan.c linux-2.6.32.46/drivers/net/wireless/rndis_wlan.c
35291--- linux-2.6.32.46/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
35292+++ linux-2.6.32.46/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
35293@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
35294
35295 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
35296
35297- if (rts_threshold < 0 || rts_threshold > 2347)
35298+ if (rts_threshold > 2347)
35299 rts_threshold = 2347;
35300
35301 tmp = cpu_to_le32(rts_threshold);
35302diff -urNp linux-2.6.32.46/drivers/oprofile/buffer_sync.c linux-2.6.32.46/drivers/oprofile/buffer_sync.c
35303--- linux-2.6.32.46/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
35304+++ linux-2.6.32.46/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
35305@@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
35306 if (cookie == NO_COOKIE)
35307 offset = pc;
35308 if (cookie == INVALID_COOKIE) {
35309- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35310+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35311 offset = pc;
35312 }
35313 if (cookie != last_cookie) {
35314@@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
35315 /* add userspace sample */
35316
35317 if (!mm) {
35318- atomic_inc(&oprofile_stats.sample_lost_no_mm);
35319+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35320 return 0;
35321 }
35322
35323 cookie = lookup_dcookie(mm, s->eip, &offset);
35324
35325 if (cookie == INVALID_COOKIE) {
35326- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35327+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35328 return 0;
35329 }
35330
35331@@ -561,7 +561,7 @@ void sync_buffer(int cpu)
35332 /* ignore backtraces if failed to add a sample */
35333 if (state == sb_bt_start) {
35334 state = sb_bt_ignore;
35335- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35336+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35337 }
35338 }
35339 release_mm(mm);
35340diff -urNp linux-2.6.32.46/drivers/oprofile/event_buffer.c linux-2.6.32.46/drivers/oprofile/event_buffer.c
35341--- linux-2.6.32.46/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
35342+++ linux-2.6.32.46/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
35343@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
35344 }
35345
35346 if (buffer_pos == buffer_size) {
35347- atomic_inc(&oprofile_stats.event_lost_overflow);
35348+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35349 return;
35350 }
35351
35352diff -urNp linux-2.6.32.46/drivers/oprofile/oprof.c linux-2.6.32.46/drivers/oprofile/oprof.c
35353--- linux-2.6.32.46/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
35354+++ linux-2.6.32.46/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
35355@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
35356 if (oprofile_ops.switch_events())
35357 return;
35358
35359- atomic_inc(&oprofile_stats.multiplex_counter);
35360+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35361 start_switch_worker();
35362 }
35363
35364diff -urNp linux-2.6.32.46/drivers/oprofile/oprofilefs.c linux-2.6.32.46/drivers/oprofile/oprofilefs.c
35365--- linux-2.6.32.46/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
35366+++ linux-2.6.32.46/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
35367@@ -187,7 +187,7 @@ static const struct file_operations atom
35368
35369
35370 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35371- char const *name, atomic_t *val)
35372+ char const *name, atomic_unchecked_t *val)
35373 {
35374 struct dentry *d = __oprofilefs_create_file(sb, root, name,
35375 &atomic_ro_fops, 0444);
35376diff -urNp linux-2.6.32.46/drivers/oprofile/oprofile_stats.c linux-2.6.32.46/drivers/oprofile/oprofile_stats.c
35377--- linux-2.6.32.46/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
35378+++ linux-2.6.32.46/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
35379@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35380 cpu_buf->sample_invalid_eip = 0;
35381 }
35382
35383- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35384- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35385- atomic_set(&oprofile_stats.event_lost_overflow, 0);
35386- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35387- atomic_set(&oprofile_stats.multiplex_counter, 0);
35388+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35389+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35390+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35391+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35392+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35393 }
35394
35395
35396diff -urNp linux-2.6.32.46/drivers/oprofile/oprofile_stats.h linux-2.6.32.46/drivers/oprofile/oprofile_stats.h
35397--- linux-2.6.32.46/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
35398+++ linux-2.6.32.46/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
35399@@ -13,11 +13,11 @@
35400 #include <asm/atomic.h>
35401
35402 struct oprofile_stat_struct {
35403- atomic_t sample_lost_no_mm;
35404- atomic_t sample_lost_no_mapping;
35405- atomic_t bt_lost_no_mapping;
35406- atomic_t event_lost_overflow;
35407- atomic_t multiplex_counter;
35408+ atomic_unchecked_t sample_lost_no_mm;
35409+ atomic_unchecked_t sample_lost_no_mapping;
35410+ atomic_unchecked_t bt_lost_no_mapping;
35411+ atomic_unchecked_t event_lost_overflow;
35412+ atomic_unchecked_t multiplex_counter;
35413 };
35414
35415 extern struct oprofile_stat_struct oprofile_stats;
35416diff -urNp linux-2.6.32.46/drivers/parisc/pdc_stable.c linux-2.6.32.46/drivers/parisc/pdc_stable.c
35417--- linux-2.6.32.46/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
35418+++ linux-2.6.32.46/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
35419@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
35420 return ret;
35421 }
35422
35423-static struct sysfs_ops pdcspath_attr_ops = {
35424+static const struct sysfs_ops pdcspath_attr_ops = {
35425 .show = pdcspath_attr_show,
35426 .store = pdcspath_attr_store,
35427 };
35428diff -urNp linux-2.6.32.46/drivers/parport/procfs.c linux-2.6.32.46/drivers/parport/procfs.c
35429--- linux-2.6.32.46/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
35430+++ linux-2.6.32.46/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
35431@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
35432
35433 *ppos += len;
35434
35435- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35436+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35437 }
35438
35439 #ifdef CONFIG_PARPORT_1284
35440@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
35441
35442 *ppos += len;
35443
35444- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35445+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35446 }
35447 #endif /* IEEE1284.3 support. */
35448
35449diff -urNp linux-2.6.32.46/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.46/drivers/pci/hotplug/acpiphp_glue.c
35450--- linux-2.6.32.46/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
35451+++ linux-2.6.32.46/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
35452@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
35453 }
35454
35455
35456-static struct acpi_dock_ops acpiphp_dock_ops = {
35457+static const struct acpi_dock_ops acpiphp_dock_ops = {
35458 .handler = handle_hotplug_event_func,
35459 };
35460
35461diff -urNp linux-2.6.32.46/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.32.46/drivers/pci/hotplug/cpci_hotplug.h
35462--- linux-2.6.32.46/drivers/pci/hotplug/cpci_hotplug.h 2011-03-27 14:31:47.000000000 -0400
35463+++ linux-2.6.32.46/drivers/pci/hotplug/cpci_hotplug.h 2011-08-05 20:33:55.000000000 -0400
35464@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35465 int (*hardware_test) (struct slot* slot, u32 value);
35466 u8 (*get_power) (struct slot* slot);
35467 int (*set_power) (struct slot* slot, int value);
35468-};
35469+} __no_const;
35470
35471 struct cpci_hp_controller {
35472 unsigned int irq;
35473diff -urNp linux-2.6.32.46/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.46/drivers/pci/hotplug/cpqphp_nvram.c
35474--- linux-2.6.32.46/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
35475+++ linux-2.6.32.46/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
35476@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
35477
35478 void compaq_nvram_init (void __iomem *rom_start)
35479 {
35480+
35481+#ifndef CONFIG_PAX_KERNEXEC
35482 if (rom_start) {
35483 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35484 }
35485+#endif
35486+
35487 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35488
35489 /* initialize our int15 lock */
35490diff -urNp linux-2.6.32.46/drivers/pci/hotplug/fakephp.c linux-2.6.32.46/drivers/pci/hotplug/fakephp.c
35491--- linux-2.6.32.46/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
35492+++ linux-2.6.32.46/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
35493@@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
35494 }
35495
35496 static struct kobj_type legacy_ktype = {
35497- .sysfs_ops = &(struct sysfs_ops){
35498+ .sysfs_ops = &(const struct sysfs_ops){
35499 .store = legacy_store, .show = legacy_show
35500 },
35501 .release = &legacy_release,
35502diff -urNp linux-2.6.32.46/drivers/pci/intel-iommu.c linux-2.6.32.46/drivers/pci/intel-iommu.c
35503--- linux-2.6.32.46/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
35504+++ linux-2.6.32.46/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
35505@@ -2643,7 +2643,7 @@ error:
35506 return 0;
35507 }
35508
35509-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
35510+dma_addr_t intel_map_page(struct device *dev, struct page *page,
35511 unsigned long offset, size_t size,
35512 enum dma_data_direction dir,
35513 struct dma_attrs *attrs)
35514@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
35515 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
35516 }
35517
35518-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35519+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35520 size_t size, enum dma_data_direction dir,
35521 struct dma_attrs *attrs)
35522 {
35523@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
35524 }
35525 }
35526
35527-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
35528+void *intel_alloc_coherent(struct device *hwdev, size_t size,
35529 dma_addr_t *dma_handle, gfp_t flags)
35530 {
35531 void *vaddr;
35532@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
35533 return NULL;
35534 }
35535
35536-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35537+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35538 dma_addr_t dma_handle)
35539 {
35540 int order;
35541@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
35542 free_pages((unsigned long)vaddr, order);
35543 }
35544
35545-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35546+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35547 int nelems, enum dma_data_direction dir,
35548 struct dma_attrs *attrs)
35549 {
35550@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
35551 return nelems;
35552 }
35553
35554-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35555+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35556 enum dma_data_direction dir, struct dma_attrs *attrs)
35557 {
35558 int i;
35559@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
35560 return nelems;
35561 }
35562
35563-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35564+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35565 {
35566 return !dma_addr;
35567 }
35568
35569-struct dma_map_ops intel_dma_ops = {
35570+const struct dma_map_ops intel_dma_ops = {
35571 .alloc_coherent = intel_alloc_coherent,
35572 .free_coherent = intel_free_coherent,
35573 .map_sg = intel_map_sg,
35574diff -urNp linux-2.6.32.46/drivers/pci/pcie/aspm.c linux-2.6.32.46/drivers/pci/pcie/aspm.c
35575--- linux-2.6.32.46/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
35576+++ linux-2.6.32.46/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
35577@@ -27,9 +27,9 @@
35578 #define MODULE_PARAM_PREFIX "pcie_aspm."
35579
35580 /* Note: those are not register definitions */
35581-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35582-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35583-#define ASPM_STATE_L1 (4) /* L1 state */
35584+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35585+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35586+#define ASPM_STATE_L1 (4U) /* L1 state */
35587 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35588 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35589
35590diff -urNp linux-2.6.32.46/drivers/pci/probe.c linux-2.6.32.46/drivers/pci/probe.c
35591--- linux-2.6.32.46/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
35592+++ linux-2.6.32.46/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
35593@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
35594 return ret;
35595 }
35596
35597-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
35598+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
35599 struct device_attribute *attr,
35600 char *buf)
35601 {
35602 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
35603 }
35604
35605-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
35606+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
35607 struct device_attribute *attr,
35608 char *buf)
35609 {
35610diff -urNp linux-2.6.32.46/drivers/pci/proc.c linux-2.6.32.46/drivers/pci/proc.c
35611--- linux-2.6.32.46/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
35612+++ linux-2.6.32.46/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
35613@@ -480,7 +480,16 @@ static const struct file_operations proc
35614 static int __init pci_proc_init(void)
35615 {
35616 struct pci_dev *dev = NULL;
35617+
35618+#ifdef CONFIG_GRKERNSEC_PROC_ADD
35619+#ifdef CONFIG_GRKERNSEC_PROC_USER
35620+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35621+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35622+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35623+#endif
35624+#else
35625 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35626+#endif
35627 proc_create("devices", 0, proc_bus_pci_dir,
35628 &proc_bus_pci_dev_operations);
35629 proc_initialized = 1;
35630diff -urNp linux-2.6.32.46/drivers/pci/slot.c linux-2.6.32.46/drivers/pci/slot.c
35631--- linux-2.6.32.46/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
35632+++ linux-2.6.32.46/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
35633@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
35634 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
35635 }
35636
35637-static struct sysfs_ops pci_slot_sysfs_ops = {
35638+static const struct sysfs_ops pci_slot_sysfs_ops = {
35639 .show = pci_slot_attr_show,
35640 .store = pci_slot_attr_store,
35641 };
35642diff -urNp linux-2.6.32.46/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.46/drivers/pcmcia/pcmcia_ioctl.c
35643--- linux-2.6.32.46/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
35644+++ linux-2.6.32.46/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
35645@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
35646 return -EFAULT;
35647 }
35648 }
35649- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
35650+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
35651 if (!buf)
35652 return -ENOMEM;
35653
35654diff -urNp linux-2.6.32.46/drivers/platform/x86/acer-wmi.c linux-2.6.32.46/drivers/platform/x86/acer-wmi.c
35655--- linux-2.6.32.46/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
35656+++ linux-2.6.32.46/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
35657@@ -918,7 +918,7 @@ static int update_bl_status(struct backl
35658 return 0;
35659 }
35660
35661-static struct backlight_ops acer_bl_ops = {
35662+static const struct backlight_ops acer_bl_ops = {
35663 .get_brightness = read_brightness,
35664 .update_status = update_bl_status,
35665 };
35666diff -urNp linux-2.6.32.46/drivers/platform/x86/asus_acpi.c linux-2.6.32.46/drivers/platform/x86/asus_acpi.c
35667--- linux-2.6.32.46/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
35668+++ linux-2.6.32.46/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
35669@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
35670 return 0;
35671 }
35672
35673-static struct backlight_ops asus_backlight_data = {
35674+static const struct backlight_ops asus_backlight_data = {
35675 .get_brightness = read_brightness,
35676 .update_status = set_brightness_status,
35677 };
35678diff -urNp linux-2.6.32.46/drivers/platform/x86/asus-laptop.c linux-2.6.32.46/drivers/platform/x86/asus-laptop.c
35679--- linux-2.6.32.46/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
35680+++ linux-2.6.32.46/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
35681@@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
35682 */
35683 static int read_brightness(struct backlight_device *bd);
35684 static int update_bl_status(struct backlight_device *bd);
35685-static struct backlight_ops asusbl_ops = {
35686+static const struct backlight_ops asusbl_ops = {
35687 .get_brightness = read_brightness,
35688 .update_status = update_bl_status,
35689 };
35690diff -urNp linux-2.6.32.46/drivers/platform/x86/compal-laptop.c linux-2.6.32.46/drivers/platform/x86/compal-laptop.c
35691--- linux-2.6.32.46/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
35692+++ linux-2.6.32.46/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
35693@@ -163,7 +163,7 @@ static int bl_update_status(struct backl
35694 return set_lcd_level(b->props.brightness);
35695 }
35696
35697-static struct backlight_ops compalbl_ops = {
35698+static const struct backlight_ops compalbl_ops = {
35699 .get_brightness = bl_get_brightness,
35700 .update_status = bl_update_status,
35701 };
35702diff -urNp linux-2.6.32.46/drivers/platform/x86/dell-laptop.c linux-2.6.32.46/drivers/platform/x86/dell-laptop.c
35703--- linux-2.6.32.46/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
35704+++ linux-2.6.32.46/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
35705@@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
35706 return buffer.output[1];
35707 }
35708
35709-static struct backlight_ops dell_ops = {
35710+static const struct backlight_ops dell_ops = {
35711 .get_brightness = dell_get_intensity,
35712 .update_status = dell_send_intensity,
35713 };
35714diff -urNp linux-2.6.32.46/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.46/drivers/platform/x86/eeepc-laptop.c
35715--- linux-2.6.32.46/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
35716+++ linux-2.6.32.46/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
35717@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
35718 */
35719 static int read_brightness(struct backlight_device *bd);
35720 static int update_bl_status(struct backlight_device *bd);
35721-static struct backlight_ops eeepcbl_ops = {
35722+static const struct backlight_ops eeepcbl_ops = {
35723 .get_brightness = read_brightness,
35724 .update_status = update_bl_status,
35725 };
35726diff -urNp linux-2.6.32.46/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.46/drivers/platform/x86/fujitsu-laptop.c
35727--- linux-2.6.32.46/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
35728+++ linux-2.6.32.46/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
35729@@ -436,7 +436,7 @@ static int bl_update_status(struct backl
35730 return ret;
35731 }
35732
35733-static struct backlight_ops fujitsubl_ops = {
35734+static const struct backlight_ops fujitsubl_ops = {
35735 .get_brightness = bl_get_brightness,
35736 .update_status = bl_update_status,
35737 };
35738diff -urNp linux-2.6.32.46/drivers/platform/x86/msi-laptop.c linux-2.6.32.46/drivers/platform/x86/msi-laptop.c
35739--- linux-2.6.32.46/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
35740+++ linux-2.6.32.46/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
35741@@ -161,7 +161,7 @@ static int bl_update_status(struct backl
35742 return set_lcd_level(b->props.brightness);
35743 }
35744
35745-static struct backlight_ops msibl_ops = {
35746+static const struct backlight_ops msibl_ops = {
35747 .get_brightness = bl_get_brightness,
35748 .update_status = bl_update_status,
35749 };
35750diff -urNp linux-2.6.32.46/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.46/drivers/platform/x86/panasonic-laptop.c
35751--- linux-2.6.32.46/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
35752+++ linux-2.6.32.46/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
35753@@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
35754 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
35755 }
35756
35757-static struct backlight_ops pcc_backlight_ops = {
35758+static const struct backlight_ops pcc_backlight_ops = {
35759 .get_brightness = bl_get,
35760 .update_status = bl_set_status,
35761 };
35762diff -urNp linux-2.6.32.46/drivers/platform/x86/sony-laptop.c linux-2.6.32.46/drivers/platform/x86/sony-laptop.c
35763--- linux-2.6.32.46/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
35764+++ linux-2.6.32.46/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
35765@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
35766 }
35767
35768 static struct backlight_device *sony_backlight_device;
35769-static struct backlight_ops sony_backlight_ops = {
35770+static const struct backlight_ops sony_backlight_ops = {
35771 .update_status = sony_backlight_update_status,
35772 .get_brightness = sony_backlight_get_brightness,
35773 };
35774diff -urNp linux-2.6.32.46/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.46/drivers/platform/x86/thinkpad_acpi.c
35775--- linux-2.6.32.46/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
35776+++ linux-2.6.32.46/drivers/platform/x86/thinkpad_acpi.c 2011-08-05 20:33:55.000000000 -0400
35777@@ -2137,7 +2137,7 @@ static int hotkey_mask_get(void)
35778 return 0;
35779 }
35780
35781-void static hotkey_mask_warn_incomplete_mask(void)
35782+static void hotkey_mask_warn_incomplete_mask(void)
35783 {
35784 /* log only what the user can fix... */
35785 const u32 wantedmask = hotkey_driver_mask &
35786@@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
35787 BACKLIGHT_UPDATE_HOTKEY);
35788 }
35789
35790-static struct backlight_ops ibm_backlight_data = {
35791+static const struct backlight_ops ibm_backlight_data = {
35792 .get_brightness = brightness_get,
35793 .update_status = brightness_update_status,
35794 };
35795diff -urNp linux-2.6.32.46/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.46/drivers/platform/x86/toshiba_acpi.c
35796--- linux-2.6.32.46/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
35797+++ linux-2.6.32.46/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
35798@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
35799 return AE_OK;
35800 }
35801
35802-static struct backlight_ops toshiba_backlight_data = {
35803+static const struct backlight_ops toshiba_backlight_data = {
35804 .get_brightness = get_lcd,
35805 .update_status = set_lcd_status,
35806 };
35807diff -urNp linux-2.6.32.46/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.46/drivers/pnp/pnpbios/bioscalls.c
35808--- linux-2.6.32.46/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
35809+++ linux-2.6.32.46/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
35810@@ -60,7 +60,7 @@ do { \
35811 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
35812 } while(0)
35813
35814-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35815+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35816 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
35817
35818 /*
35819@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
35820
35821 cpu = get_cpu();
35822 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35823+
35824+ pax_open_kernel();
35825 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
35826+ pax_close_kernel();
35827
35828 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35829 spin_lock_irqsave(&pnp_bios_lock, flags);
35830@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
35831 :"memory");
35832 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35833
35834+ pax_open_kernel();
35835 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
35836+ pax_close_kernel();
35837+
35838 put_cpu();
35839
35840 /* If we get here and this is set then the PnP BIOS faulted on us. */
35841@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
35842 return status;
35843 }
35844
35845-void pnpbios_calls_init(union pnp_bios_install_struct *header)
35846+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35847 {
35848 int i;
35849
35850@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
35851 pnp_bios_callpoint.offset = header->fields.pm16offset;
35852 pnp_bios_callpoint.segment = PNP_CS16;
35853
35854+ pax_open_kernel();
35855+
35856 for_each_possible_cpu(i) {
35857 struct desc_struct *gdt = get_cpu_gdt_table(i);
35858 if (!gdt)
35859@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
35860 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35861 (unsigned long)__va(header->fields.pm16dseg));
35862 }
35863+
35864+ pax_close_kernel();
35865 }
35866diff -urNp linux-2.6.32.46/drivers/pnp/resource.c linux-2.6.32.46/drivers/pnp/resource.c
35867--- linux-2.6.32.46/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
35868+++ linux-2.6.32.46/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
35869@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
35870 return 1;
35871
35872 /* check if the resource is valid */
35873- if (*irq < 0 || *irq > 15)
35874+ if (*irq > 15)
35875 return 0;
35876
35877 /* check if the resource is reserved */
35878@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
35879 return 1;
35880
35881 /* check if the resource is valid */
35882- if (*dma < 0 || *dma == 4 || *dma > 7)
35883+ if (*dma == 4 || *dma > 7)
35884 return 0;
35885
35886 /* check if the resource is reserved */
35887diff -urNp linux-2.6.32.46/drivers/power/bq27x00_battery.c linux-2.6.32.46/drivers/power/bq27x00_battery.c
35888--- linux-2.6.32.46/drivers/power/bq27x00_battery.c 2011-03-27 14:31:47.000000000 -0400
35889+++ linux-2.6.32.46/drivers/power/bq27x00_battery.c 2011-08-05 20:33:55.000000000 -0400
35890@@ -44,7 +44,7 @@ struct bq27x00_device_info;
35891 struct bq27x00_access_methods {
35892 int (*read)(u8 reg, int *rt_value, int b_single,
35893 struct bq27x00_device_info *di);
35894-};
35895+} __no_const;
35896
35897 struct bq27x00_device_info {
35898 struct device *dev;
35899diff -urNp linux-2.6.32.46/drivers/rtc/rtc-dev.c linux-2.6.32.46/drivers/rtc/rtc-dev.c
35900--- linux-2.6.32.46/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
35901+++ linux-2.6.32.46/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
35902@@ -14,6 +14,7 @@
35903 #include <linux/module.h>
35904 #include <linux/rtc.h>
35905 #include <linux/sched.h>
35906+#include <linux/grsecurity.h>
35907 #include "rtc-core.h"
35908
35909 static dev_t rtc_devt;
35910@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
35911 if (copy_from_user(&tm, uarg, sizeof(tm)))
35912 return -EFAULT;
35913
35914+ gr_log_timechange();
35915+
35916 return rtc_set_time(rtc, &tm);
35917
35918 case RTC_PIE_ON:
35919diff -urNp linux-2.6.32.46/drivers/s390/cio/qdio_perf.c linux-2.6.32.46/drivers/s390/cio/qdio_perf.c
35920--- linux-2.6.32.46/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
35921+++ linux-2.6.32.46/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
35922@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
35923 static int qdio_perf_proc_show(struct seq_file *m, void *v)
35924 {
35925 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
35926- (long)atomic_long_read(&perf_stats.qdio_int));
35927+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
35928 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
35929- (long)atomic_long_read(&perf_stats.pci_int));
35930+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
35931 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
35932- (long)atomic_long_read(&perf_stats.thin_int));
35933+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
35934 seq_printf(m, "\n");
35935 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
35936- (long)atomic_long_read(&perf_stats.tasklet_inbound));
35937+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
35938 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
35939- (long)atomic_long_read(&perf_stats.tasklet_outbound));
35940+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
35941 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
35942- (long)atomic_long_read(&perf_stats.tasklet_thinint),
35943- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
35944+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
35945+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
35946 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
35947- (long)atomic_long_read(&perf_stats.thinint_inbound),
35948- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
35949+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
35950+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
35951 seq_printf(m, "\n");
35952 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
35953- (long)atomic_long_read(&perf_stats.siga_in));
35954+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
35955 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
35956- (long)atomic_long_read(&perf_stats.siga_out));
35957+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
35958 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
35959- (long)atomic_long_read(&perf_stats.siga_sync));
35960+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
35961 seq_printf(m, "\n");
35962 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
35963- (long)atomic_long_read(&perf_stats.inbound_handler));
35964+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
35965 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
35966- (long)atomic_long_read(&perf_stats.outbound_handler));
35967+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
35968 seq_printf(m, "\n");
35969 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
35970- (long)atomic_long_read(&perf_stats.fast_requeue));
35971+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
35972 seq_printf(m, "Number of outbound target full condition\t: %li\n",
35973- (long)atomic_long_read(&perf_stats.outbound_target_full));
35974+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
35975 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
35976- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
35977+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
35978 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
35979- (long)atomic_long_read(&perf_stats.debug_stop_polling));
35980+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
35981 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
35982- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
35983+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
35984 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
35985- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
35986- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
35987+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
35988+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
35989 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
35990- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
35991- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
35992+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
35993+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
35994 seq_printf(m, "\n");
35995 return 0;
35996 }
35997diff -urNp linux-2.6.32.46/drivers/s390/cio/qdio_perf.h linux-2.6.32.46/drivers/s390/cio/qdio_perf.h
35998--- linux-2.6.32.46/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
35999+++ linux-2.6.32.46/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
36000@@ -13,46 +13,46 @@
36001
36002 struct qdio_perf_stats {
36003 /* interrupt handler calls */
36004- atomic_long_t qdio_int;
36005- atomic_long_t pci_int;
36006- atomic_long_t thin_int;
36007+ atomic_long_unchecked_t qdio_int;
36008+ atomic_long_unchecked_t pci_int;
36009+ atomic_long_unchecked_t thin_int;
36010
36011 /* tasklet runs */
36012- atomic_long_t tasklet_inbound;
36013- atomic_long_t tasklet_outbound;
36014- atomic_long_t tasklet_thinint;
36015- atomic_long_t tasklet_thinint_loop;
36016- atomic_long_t thinint_inbound;
36017- atomic_long_t thinint_inbound_loop;
36018- atomic_long_t thinint_inbound_loop2;
36019+ atomic_long_unchecked_t tasklet_inbound;
36020+ atomic_long_unchecked_t tasklet_outbound;
36021+ atomic_long_unchecked_t tasklet_thinint;
36022+ atomic_long_unchecked_t tasklet_thinint_loop;
36023+ atomic_long_unchecked_t thinint_inbound;
36024+ atomic_long_unchecked_t thinint_inbound_loop;
36025+ atomic_long_unchecked_t thinint_inbound_loop2;
36026
36027 /* signal adapter calls */
36028- atomic_long_t siga_out;
36029- atomic_long_t siga_in;
36030- atomic_long_t siga_sync;
36031+ atomic_long_unchecked_t siga_out;
36032+ atomic_long_unchecked_t siga_in;
36033+ atomic_long_unchecked_t siga_sync;
36034
36035 /* misc */
36036- atomic_long_t inbound_handler;
36037- atomic_long_t outbound_handler;
36038- atomic_long_t fast_requeue;
36039- atomic_long_t outbound_target_full;
36040+ atomic_long_unchecked_t inbound_handler;
36041+ atomic_long_unchecked_t outbound_handler;
36042+ atomic_long_unchecked_t fast_requeue;
36043+ atomic_long_unchecked_t outbound_target_full;
36044
36045 /* for debugging */
36046- atomic_long_t debug_tl_out_timer;
36047- atomic_long_t debug_stop_polling;
36048- atomic_long_t debug_eqbs_all;
36049- atomic_long_t debug_eqbs_incomplete;
36050- atomic_long_t debug_sqbs_all;
36051- atomic_long_t debug_sqbs_incomplete;
36052+ atomic_long_unchecked_t debug_tl_out_timer;
36053+ atomic_long_unchecked_t debug_stop_polling;
36054+ atomic_long_unchecked_t debug_eqbs_all;
36055+ atomic_long_unchecked_t debug_eqbs_incomplete;
36056+ atomic_long_unchecked_t debug_sqbs_all;
36057+ atomic_long_unchecked_t debug_sqbs_incomplete;
36058 };
36059
36060 extern struct qdio_perf_stats perf_stats;
36061 extern int qdio_performance_stats;
36062
36063-static inline void qdio_perf_stat_inc(atomic_long_t *count)
36064+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
36065 {
36066 if (qdio_performance_stats)
36067- atomic_long_inc(count);
36068+ atomic_long_inc_unchecked(count);
36069 }
36070
36071 int qdio_setup_perf_stats(void);
36072diff -urNp linux-2.6.32.46/drivers/scsi/aacraid/aacraid.h linux-2.6.32.46/drivers/scsi/aacraid/aacraid.h
36073--- linux-2.6.32.46/drivers/scsi/aacraid/aacraid.h 2011-03-27 14:31:47.000000000 -0400
36074+++ linux-2.6.32.46/drivers/scsi/aacraid/aacraid.h 2011-08-05 20:33:55.000000000 -0400
36075@@ -471,7 +471,7 @@ struct adapter_ops
36076 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36077 /* Administrative operations */
36078 int (*adapter_comm)(struct aac_dev * dev, int comm);
36079-};
36080+} __no_const;
36081
36082 /*
36083 * Define which interrupt handler needs to be installed
36084diff -urNp linux-2.6.32.46/drivers/scsi/aacraid/commctrl.c linux-2.6.32.46/drivers/scsi/aacraid/commctrl.c
36085--- linux-2.6.32.46/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
36086+++ linux-2.6.32.46/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
36087@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
36088 u32 actual_fibsize64, actual_fibsize = 0;
36089 int i;
36090
36091+ pax_track_stack();
36092
36093 if (dev->in_reset) {
36094 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
36095diff -urNp linux-2.6.32.46/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.46/drivers/scsi/aic94xx/aic94xx_init.c
36096--- linux-2.6.32.46/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
36097+++ linux-2.6.32.46/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
36098@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
36099 flash_error_table[i].reason);
36100 }
36101
36102-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
36103+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
36104 asd_show_update_bios, asd_store_update_bios);
36105
36106 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
36107diff -urNp linux-2.6.32.46/drivers/scsi/bfa/bfa_iocfc.h linux-2.6.32.46/drivers/scsi/bfa/bfa_iocfc.h
36108--- linux-2.6.32.46/drivers/scsi/bfa/bfa_iocfc.h 2011-03-27 14:31:47.000000000 -0400
36109+++ linux-2.6.32.46/drivers/scsi/bfa/bfa_iocfc.h 2011-08-05 20:33:55.000000000 -0400
36110@@ -61,7 +61,7 @@ struct bfa_hwif_s {
36111 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
36112 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
36113 u32 *nvecs, u32 *maxvec);
36114-};
36115+} __no_const;
36116 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36117
36118 struct bfa_iocfc_s {
36119diff -urNp linux-2.6.32.46/drivers/scsi/bfa/bfa_ioc.h linux-2.6.32.46/drivers/scsi/bfa/bfa_ioc.h
36120--- linux-2.6.32.46/drivers/scsi/bfa/bfa_ioc.h 2011-03-27 14:31:47.000000000 -0400
36121+++ linux-2.6.32.46/drivers/scsi/bfa/bfa_ioc.h 2011-08-05 20:33:55.000000000 -0400
36122@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
36123 bfa_ioc_disable_cbfn_t disable_cbfn;
36124 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36125 bfa_ioc_reset_cbfn_t reset_cbfn;
36126-};
36127+} __no_const;
36128
36129 /**
36130 * Heartbeat failure notification queue element.
36131diff -urNp linux-2.6.32.46/drivers/scsi/BusLogic.c linux-2.6.32.46/drivers/scsi/BusLogic.c
36132--- linux-2.6.32.46/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
36133+++ linux-2.6.32.46/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
36134@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
36135 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
36136 *PrototypeHostAdapter)
36137 {
36138+ pax_track_stack();
36139+
36140 /*
36141 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
36142 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
36143diff -urNp linux-2.6.32.46/drivers/scsi/dpt_i2o.c linux-2.6.32.46/drivers/scsi/dpt_i2o.c
36144--- linux-2.6.32.46/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
36145+++ linux-2.6.32.46/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
36146@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
36147 dma_addr_t addr;
36148 ulong flags = 0;
36149
36150+ pax_track_stack();
36151+
36152 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
36153 // get user msg size in u32s
36154 if(get_user(size, &user_msg[0])){
36155@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
36156 s32 rcode;
36157 dma_addr_t addr;
36158
36159+ pax_track_stack();
36160+
36161 memset(msg, 0 , sizeof(msg));
36162 len = scsi_bufflen(cmd);
36163 direction = 0x00000000;
36164diff -urNp linux-2.6.32.46/drivers/scsi/eata.c linux-2.6.32.46/drivers/scsi/eata.c
36165--- linux-2.6.32.46/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
36166+++ linux-2.6.32.46/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
36167@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
36168 struct hostdata *ha;
36169 char name[16];
36170
36171+ pax_track_stack();
36172+
36173 sprintf(name, "%s%d", driver_name, j);
36174
36175 if (!request_region(port_base, REGION_SIZE, driver_name)) {
36176diff -urNp linux-2.6.32.46/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.46/drivers/scsi/fcoe/libfcoe.c
36177--- linux-2.6.32.46/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
36178+++ linux-2.6.32.46/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
36179@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
36180 size_t rlen;
36181 size_t dlen;
36182
36183+ pax_track_stack();
36184+
36185 fiph = (struct fip_header *)skb->data;
36186 sub = fiph->fip_subcode;
36187 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
36188diff -urNp linux-2.6.32.46/drivers/scsi/fnic/fnic_main.c linux-2.6.32.46/drivers/scsi/fnic/fnic_main.c
36189--- linux-2.6.32.46/drivers/scsi/fnic/fnic_main.c 2011-03-27 14:31:47.000000000 -0400
36190+++ linux-2.6.32.46/drivers/scsi/fnic/fnic_main.c 2011-08-05 20:33:55.000000000 -0400
36191@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct p
36192 /* Start local port initiatialization */
36193
36194 lp->link_up = 0;
36195- lp->tt = fnic_transport_template;
36196+ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
36197
36198 lp->max_retry_count = fnic->config.flogi_retries;
36199 lp->max_rport_retry_count = fnic->config.plogi_retries;
36200diff -urNp linux-2.6.32.46/drivers/scsi/gdth.c linux-2.6.32.46/drivers/scsi/gdth.c
36201--- linux-2.6.32.46/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
36202+++ linux-2.6.32.46/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
36203@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
36204 ulong flags;
36205 gdth_ha_str *ha;
36206
36207+ pax_track_stack();
36208+
36209 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
36210 return -EFAULT;
36211 ha = gdth_find_ha(ldrv.ionode);
36212@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
36213 gdth_ha_str *ha;
36214 int rval;
36215
36216+ pax_track_stack();
36217+
36218 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
36219 res.number >= MAX_HDRIVES)
36220 return -EFAULT;
36221@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
36222 gdth_ha_str *ha;
36223 int rval;
36224
36225+ pax_track_stack();
36226+
36227 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
36228 return -EFAULT;
36229 ha = gdth_find_ha(gen.ionode);
36230@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
36231 int i;
36232 gdth_cmd_str gdtcmd;
36233 char cmnd[MAX_COMMAND_SIZE];
36234+
36235+ pax_track_stack();
36236+
36237 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
36238
36239 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
36240diff -urNp linux-2.6.32.46/drivers/scsi/gdth_proc.c linux-2.6.32.46/drivers/scsi/gdth_proc.c
36241--- linux-2.6.32.46/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
36242+++ linux-2.6.32.46/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
36243@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
36244 ulong64 paddr;
36245
36246 char cmnd[MAX_COMMAND_SIZE];
36247+
36248+ pax_track_stack();
36249+
36250 memset(cmnd, 0xff, 12);
36251 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
36252
36253@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
36254 gdth_hget_str *phg;
36255 char cmnd[MAX_COMMAND_SIZE];
36256
36257+ pax_track_stack();
36258+
36259 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
36260 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
36261 if (!gdtcmd || !estr)
36262diff -urNp linux-2.6.32.46/drivers/scsi/hosts.c linux-2.6.32.46/drivers/scsi/hosts.c
36263--- linux-2.6.32.46/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
36264+++ linux-2.6.32.46/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
36265@@ -40,7 +40,7 @@
36266 #include "scsi_logging.h"
36267
36268
36269-static atomic_t scsi_host_next_hn; /* host_no for next new host */
36270+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36271
36272
36273 static void scsi_host_cls_release(struct device *dev)
36274@@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
36275 * subtract one because we increment first then return, but we need to
36276 * know what the next host number was before increment
36277 */
36278- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36279+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36280 shost->dma_channel = 0xff;
36281
36282 /* These three are default values which can be overridden */
36283diff -urNp linux-2.6.32.46/drivers/scsi/ipr.c linux-2.6.32.46/drivers/scsi/ipr.c
36284--- linux-2.6.32.46/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
36285+++ linux-2.6.32.46/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
36286@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
36287 return true;
36288 }
36289
36290-static struct ata_port_operations ipr_sata_ops = {
36291+static const struct ata_port_operations ipr_sata_ops = {
36292 .phy_reset = ipr_ata_phy_reset,
36293 .hardreset = ipr_sata_reset,
36294 .post_internal_cmd = ipr_ata_post_internal,
36295diff -urNp linux-2.6.32.46/drivers/scsi/ips.h linux-2.6.32.46/drivers/scsi/ips.h
36296--- linux-2.6.32.46/drivers/scsi/ips.h 2011-03-27 14:31:47.000000000 -0400
36297+++ linux-2.6.32.46/drivers/scsi/ips.h 2011-08-05 20:33:55.000000000 -0400
36298@@ -1027,7 +1027,7 @@ typedef struct {
36299 int (*intr)(struct ips_ha *);
36300 void (*enableint)(struct ips_ha *);
36301 uint32_t (*statupd)(struct ips_ha *);
36302-} ips_hw_func_t;
36303+} __no_const ips_hw_func_t;
36304
36305 typedef struct ips_ha {
36306 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36307diff -urNp linux-2.6.32.46/drivers/scsi/libfc/fc_exch.c linux-2.6.32.46/drivers/scsi/libfc/fc_exch.c
36308--- linux-2.6.32.46/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
36309+++ linux-2.6.32.46/drivers/scsi/libfc/fc_exch.c 2011-08-23 21:22:32.000000000 -0400
36310@@ -86,12 +86,12 @@ struct fc_exch_mgr {
36311 * all together if not used XXX
36312 */
36313 struct {
36314- atomic_t no_free_exch;
36315- atomic_t no_free_exch_xid;
36316- atomic_t xid_not_found;
36317- atomic_t xid_busy;
36318- atomic_t seq_not_found;
36319- atomic_t non_bls_resp;
36320+ atomic_unchecked_t no_free_exch;
36321+ atomic_unchecked_t no_free_exch_xid;
36322+ atomic_unchecked_t xid_not_found;
36323+ atomic_unchecked_t xid_busy;
36324+ atomic_unchecked_t seq_not_found;
36325+ atomic_unchecked_t non_bls_resp;
36326 } stats;
36327 };
36328 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
36329@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
36330 /* allocate memory for exchange */
36331 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36332 if (!ep) {
36333- atomic_inc(&mp->stats.no_free_exch);
36334+ atomic_inc_unchecked(&mp->stats.no_free_exch);
36335 goto out;
36336 }
36337 memset(ep, 0, sizeof(*ep));
36338@@ -557,7 +557,7 @@ out:
36339 return ep;
36340 err:
36341 spin_unlock_bh(&pool->lock);
36342- atomic_inc(&mp->stats.no_free_exch_xid);
36343+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36344 mempool_free(ep, mp->ep_pool);
36345 return NULL;
36346 }
36347@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36348 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36349 ep = fc_exch_find(mp, xid);
36350 if (!ep) {
36351- atomic_inc(&mp->stats.xid_not_found);
36352+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36353 reject = FC_RJT_OX_ID;
36354 goto out;
36355 }
36356@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36357 ep = fc_exch_find(mp, xid);
36358 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36359 if (ep) {
36360- atomic_inc(&mp->stats.xid_busy);
36361+ atomic_inc_unchecked(&mp->stats.xid_busy);
36362 reject = FC_RJT_RX_ID;
36363 goto rel;
36364 }
36365@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36366 }
36367 xid = ep->xid; /* get our XID */
36368 } else if (!ep) {
36369- atomic_inc(&mp->stats.xid_not_found);
36370+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36371 reject = FC_RJT_RX_ID; /* XID not found */
36372 goto out;
36373 }
36374@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36375 } else {
36376 sp = &ep->seq;
36377 if (sp->id != fh->fh_seq_id) {
36378- atomic_inc(&mp->stats.seq_not_found);
36379+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36380 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
36381 goto rel;
36382 }
36383@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
36384
36385 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36386 if (!ep) {
36387- atomic_inc(&mp->stats.xid_not_found);
36388+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36389 goto out;
36390 }
36391 if (ep->esb_stat & ESB_ST_COMPLETE) {
36392- atomic_inc(&mp->stats.xid_not_found);
36393+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36394 goto out;
36395 }
36396 if (ep->rxid == FC_XID_UNKNOWN)
36397 ep->rxid = ntohs(fh->fh_rx_id);
36398 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36399- atomic_inc(&mp->stats.xid_not_found);
36400+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36401 goto rel;
36402 }
36403 if (ep->did != ntoh24(fh->fh_s_id) &&
36404 ep->did != FC_FID_FLOGI) {
36405- atomic_inc(&mp->stats.xid_not_found);
36406+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36407 goto rel;
36408 }
36409 sof = fr_sof(fp);
36410@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
36411 } else {
36412 sp = &ep->seq;
36413 if (sp->id != fh->fh_seq_id) {
36414- atomic_inc(&mp->stats.seq_not_found);
36415+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36416 goto rel;
36417 }
36418 }
36419@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
36420 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36421
36422 if (!sp)
36423- atomic_inc(&mp->stats.xid_not_found);
36424+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36425 else
36426- atomic_inc(&mp->stats.non_bls_resp);
36427+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
36428
36429 fc_frame_free(fp);
36430 }
36431diff -urNp linux-2.6.32.46/drivers/scsi/libsas/sas_ata.c linux-2.6.32.46/drivers/scsi/libsas/sas_ata.c
36432--- linux-2.6.32.46/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
36433+++ linux-2.6.32.46/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
36434@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
36435 }
36436 }
36437
36438-static struct ata_port_operations sas_sata_ops = {
36439+static const struct ata_port_operations sas_sata_ops = {
36440 .phy_reset = sas_ata_phy_reset,
36441 .post_internal_cmd = sas_ata_post_internal,
36442 .qc_defer = ata_std_qc_defer,
36443diff -urNp linux-2.6.32.46/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.46/drivers/scsi/lpfc/lpfc_debugfs.c
36444--- linux-2.6.32.46/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
36445+++ linux-2.6.32.46/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
36446@@ -124,7 +124,7 @@ struct lpfc_debug {
36447 int len;
36448 };
36449
36450-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36451+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36452 static unsigned long lpfc_debugfs_start_time = 0L;
36453
36454 /**
36455@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
36456 lpfc_debugfs_enable = 0;
36457
36458 len = 0;
36459- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36460+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36461 (lpfc_debugfs_max_disc_trc - 1);
36462 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36463 dtp = vport->disc_trc + i;
36464@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
36465 lpfc_debugfs_enable = 0;
36466
36467 len = 0;
36468- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36469+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36470 (lpfc_debugfs_max_slow_ring_trc - 1);
36471 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36472 dtp = phba->slow_ring_trc + i;
36473@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
36474 uint32_t *ptr;
36475 char buffer[1024];
36476
36477+ pax_track_stack();
36478+
36479 off = 0;
36480 spin_lock_irq(&phba->hbalock);
36481
36482@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
36483 !vport || !vport->disc_trc)
36484 return;
36485
36486- index = atomic_inc_return(&vport->disc_trc_cnt) &
36487+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36488 (lpfc_debugfs_max_disc_trc - 1);
36489 dtp = vport->disc_trc + index;
36490 dtp->fmt = fmt;
36491 dtp->data1 = data1;
36492 dtp->data2 = data2;
36493 dtp->data3 = data3;
36494- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36495+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36496 dtp->jif = jiffies;
36497 #endif
36498 return;
36499@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
36500 !phba || !phba->slow_ring_trc)
36501 return;
36502
36503- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36504+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36505 (lpfc_debugfs_max_slow_ring_trc - 1);
36506 dtp = phba->slow_ring_trc + index;
36507 dtp->fmt = fmt;
36508 dtp->data1 = data1;
36509 dtp->data2 = data2;
36510 dtp->data3 = data3;
36511- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36512+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36513 dtp->jif = jiffies;
36514 #endif
36515 return;
36516@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
36517 "slow_ring buffer\n");
36518 goto debug_failed;
36519 }
36520- atomic_set(&phba->slow_ring_trc_cnt, 0);
36521+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36522 memset(phba->slow_ring_trc, 0,
36523 (sizeof(struct lpfc_debugfs_trc) *
36524 lpfc_debugfs_max_slow_ring_trc));
36525@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
36526 "buffer\n");
36527 goto debug_failed;
36528 }
36529- atomic_set(&vport->disc_trc_cnt, 0);
36530+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36531
36532 snprintf(name, sizeof(name), "discovery_trace");
36533 vport->debug_disc_trc =
36534diff -urNp linux-2.6.32.46/drivers/scsi/lpfc/lpfc.h linux-2.6.32.46/drivers/scsi/lpfc/lpfc.h
36535--- linux-2.6.32.46/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
36536+++ linux-2.6.32.46/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
36537@@ -400,7 +400,7 @@ struct lpfc_vport {
36538 struct dentry *debug_nodelist;
36539 struct dentry *vport_debugfs_root;
36540 struct lpfc_debugfs_trc *disc_trc;
36541- atomic_t disc_trc_cnt;
36542+ atomic_unchecked_t disc_trc_cnt;
36543 #endif
36544 uint8_t stat_data_enabled;
36545 uint8_t stat_data_blocked;
36546@@ -725,8 +725,8 @@ struct lpfc_hba {
36547 struct timer_list fabric_block_timer;
36548 unsigned long bit_flags;
36549 #define FABRIC_COMANDS_BLOCKED 0
36550- atomic_t num_rsrc_err;
36551- atomic_t num_cmd_success;
36552+ atomic_unchecked_t num_rsrc_err;
36553+ atomic_unchecked_t num_cmd_success;
36554 unsigned long last_rsrc_error_time;
36555 unsigned long last_ramp_down_time;
36556 unsigned long last_ramp_up_time;
36557@@ -740,7 +740,7 @@ struct lpfc_hba {
36558 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
36559 struct dentry *debug_slow_ring_trc;
36560 struct lpfc_debugfs_trc *slow_ring_trc;
36561- atomic_t slow_ring_trc_cnt;
36562+ atomic_unchecked_t slow_ring_trc_cnt;
36563 #endif
36564
36565 /* Used for deferred freeing of ELS data buffers */
36566diff -urNp linux-2.6.32.46/drivers/scsi/lpfc/lpfc_init.c linux-2.6.32.46/drivers/scsi/lpfc/lpfc_init.c
36567--- linux-2.6.32.46/drivers/scsi/lpfc/lpfc_init.c 2011-03-27 14:31:47.000000000 -0400
36568+++ linux-2.6.32.46/drivers/scsi/lpfc/lpfc_init.c 2011-08-05 20:33:55.000000000 -0400
36569@@ -8021,8 +8021,10 @@ lpfc_init(void)
36570 printk(LPFC_COPYRIGHT "\n");
36571
36572 if (lpfc_enable_npiv) {
36573- lpfc_transport_functions.vport_create = lpfc_vport_create;
36574- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36575+ pax_open_kernel();
36576+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36577+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36578+ pax_close_kernel();
36579 }
36580 lpfc_transport_template =
36581 fc_attach_transport(&lpfc_transport_functions);
36582diff -urNp linux-2.6.32.46/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.46/drivers/scsi/lpfc/lpfc_scsi.c
36583--- linux-2.6.32.46/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
36584+++ linux-2.6.32.46/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
36585@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
36586 uint32_t evt_posted;
36587
36588 spin_lock_irqsave(&phba->hbalock, flags);
36589- atomic_inc(&phba->num_rsrc_err);
36590+ atomic_inc_unchecked(&phba->num_rsrc_err);
36591 phba->last_rsrc_error_time = jiffies;
36592
36593 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36594@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
36595 unsigned long flags;
36596 struct lpfc_hba *phba = vport->phba;
36597 uint32_t evt_posted;
36598- atomic_inc(&phba->num_cmd_success);
36599+ atomic_inc_unchecked(&phba->num_cmd_success);
36600
36601 if (vport->cfg_lun_queue_depth <= queue_depth)
36602 return;
36603@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
36604 int i;
36605 struct lpfc_rport_data *rdata;
36606
36607- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36608- num_cmd_success = atomic_read(&phba->num_cmd_success);
36609+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36610+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36611
36612 vports = lpfc_create_vport_work_array(phba);
36613 if (vports != NULL)
36614@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
36615 }
36616 }
36617 lpfc_destroy_vport_work_array(phba, vports);
36618- atomic_set(&phba->num_rsrc_err, 0);
36619- atomic_set(&phba->num_cmd_success, 0);
36620+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36621+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36622 }
36623
36624 /**
36625@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
36626 }
36627 }
36628 lpfc_destroy_vport_work_array(phba, vports);
36629- atomic_set(&phba->num_rsrc_err, 0);
36630- atomic_set(&phba->num_cmd_success, 0);
36631+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36632+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36633 }
36634
36635 /**
36636diff -urNp linux-2.6.32.46/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.46/drivers/scsi/megaraid/megaraid_mbox.c
36637--- linux-2.6.32.46/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
36638+++ linux-2.6.32.46/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
36639@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
36640 int rval;
36641 int i;
36642
36643+ pax_track_stack();
36644+
36645 // Allocate memory for the base list of scb for management module.
36646 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
36647
36648diff -urNp linux-2.6.32.46/drivers/scsi/osd/osd_initiator.c linux-2.6.32.46/drivers/scsi/osd/osd_initiator.c
36649--- linux-2.6.32.46/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
36650+++ linux-2.6.32.46/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
36651@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
36652 int nelem = ARRAY_SIZE(get_attrs), a = 0;
36653 int ret;
36654
36655+ pax_track_stack();
36656+
36657 or = osd_start_request(od, GFP_KERNEL);
36658 if (!or)
36659 return -ENOMEM;
36660diff -urNp linux-2.6.32.46/drivers/scsi/pmcraid.c linux-2.6.32.46/drivers/scsi/pmcraid.c
36661--- linux-2.6.32.46/drivers/scsi/pmcraid.c 2011-08-09 18:35:29.000000000 -0400
36662+++ linux-2.6.32.46/drivers/scsi/pmcraid.c 2011-08-09 18:33:59.000000000 -0400
36663@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
36664 res->scsi_dev = scsi_dev;
36665 scsi_dev->hostdata = res;
36666 res->change_detected = 0;
36667- atomic_set(&res->read_failures, 0);
36668- atomic_set(&res->write_failures, 0);
36669+ atomic_set_unchecked(&res->read_failures, 0);
36670+ atomic_set_unchecked(&res->write_failures, 0);
36671 rc = 0;
36672 }
36673 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36674@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
36675
36676 /* If this was a SCSI read/write command keep count of errors */
36677 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36678- atomic_inc(&res->read_failures);
36679+ atomic_inc_unchecked(&res->read_failures);
36680 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36681- atomic_inc(&res->write_failures);
36682+ atomic_inc_unchecked(&res->write_failures);
36683
36684 if (!RES_IS_GSCSI(res->cfg_entry) &&
36685 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36686@@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(stru
36687
36688 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36689 /* add resources only after host is added into system */
36690- if (!atomic_read(&pinstance->expose_resources))
36691+ if (!atomic_read_unchecked(&pinstance->expose_resources))
36692 return;
36693
36694 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
36695@@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instan
36696 init_waitqueue_head(&pinstance->reset_wait_q);
36697
36698 atomic_set(&pinstance->outstanding_cmds, 0);
36699- atomic_set(&pinstance->expose_resources, 0);
36700+ atomic_set_unchecked(&pinstance->expose_resources, 0);
36701
36702 INIT_LIST_HEAD(&pinstance->free_res_q);
36703 INIT_LIST_HEAD(&pinstance->used_res_q);
36704@@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
36705 /* Schedule worker thread to handle CCN and take care of adding and
36706 * removing devices to OS
36707 */
36708- atomic_set(&pinstance->expose_resources, 1);
36709+ atomic_set_unchecked(&pinstance->expose_resources, 1);
36710 schedule_work(&pinstance->worker_q);
36711 return rc;
36712
36713diff -urNp linux-2.6.32.46/drivers/scsi/pmcraid.h linux-2.6.32.46/drivers/scsi/pmcraid.h
36714--- linux-2.6.32.46/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
36715+++ linux-2.6.32.46/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
36716@@ -690,7 +690,7 @@ struct pmcraid_instance {
36717 atomic_t outstanding_cmds;
36718
36719 /* should add/delete resources to mid-layer now ?*/
36720- atomic_t expose_resources;
36721+ atomic_unchecked_t expose_resources;
36722
36723 /* Tasklet to handle deferred processing */
36724 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
36725@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
36726 struct list_head queue; /* link to "to be exposed" resources */
36727 struct pmcraid_config_table_entry cfg_entry;
36728 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36729- atomic_t read_failures; /* count of failed READ commands */
36730- atomic_t write_failures; /* count of failed WRITE commands */
36731+ atomic_unchecked_t read_failures; /* count of failed READ commands */
36732+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36733
36734 /* To indicate add/delete/modify during CCN */
36735 u8 change_detected;
36736diff -urNp linux-2.6.32.46/drivers/scsi/qla2xxx/qla_def.h linux-2.6.32.46/drivers/scsi/qla2xxx/qla_def.h
36737--- linux-2.6.32.46/drivers/scsi/qla2xxx/qla_def.h 2011-03-27 14:31:47.000000000 -0400
36738+++ linux-2.6.32.46/drivers/scsi/qla2xxx/qla_def.h 2011-08-05 20:33:55.000000000 -0400
36739@@ -2089,7 +2089,7 @@ struct isp_operations {
36740
36741 int (*get_flash_version) (struct scsi_qla_host *, void *);
36742 int (*start_scsi) (srb_t *);
36743-};
36744+} __no_const;
36745
36746 /* MSI-X Support *************************************************************/
36747
36748diff -urNp linux-2.6.32.46/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.46/drivers/scsi/qla4xxx/ql4_def.h
36749--- linux-2.6.32.46/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
36750+++ linux-2.6.32.46/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
36751@@ -240,7 +240,7 @@ struct ddb_entry {
36752 atomic_t retry_relogin_timer; /* Min Time between relogins
36753 * (4000 only) */
36754 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
36755- atomic_t relogin_retry_count; /* Num of times relogin has been
36756+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36757 * retried */
36758
36759 uint16_t port;
36760diff -urNp linux-2.6.32.46/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.46/drivers/scsi/qla4xxx/ql4_init.c
36761--- linux-2.6.32.46/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
36762+++ linux-2.6.32.46/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
36763@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
36764 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
36765 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36766 atomic_set(&ddb_entry->relogin_timer, 0);
36767- atomic_set(&ddb_entry->relogin_retry_count, 0);
36768+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36769 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36770 list_add_tail(&ddb_entry->list, &ha->ddb_list);
36771 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
36772@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
36773 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36774 atomic_set(&ddb_entry->port_down_timer,
36775 ha->port_down_retry_count);
36776- atomic_set(&ddb_entry->relogin_retry_count, 0);
36777+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36778 atomic_set(&ddb_entry->relogin_timer, 0);
36779 clear_bit(DF_RELOGIN, &ddb_entry->flags);
36780 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
36781diff -urNp linux-2.6.32.46/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.46/drivers/scsi/qla4xxx/ql4_os.c
36782--- linux-2.6.32.46/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
36783+++ linux-2.6.32.46/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
36784@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
36785 ddb_entry->fw_ddb_device_state ==
36786 DDB_DS_SESSION_FAILED) {
36787 /* Reset retry relogin timer */
36788- atomic_inc(&ddb_entry->relogin_retry_count);
36789+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36790 DEBUG2(printk("scsi%ld: index[%d] relogin"
36791 " timed out-retrying"
36792 " relogin (%d)\n",
36793 ha->host_no,
36794 ddb_entry->fw_ddb_index,
36795- atomic_read(&ddb_entry->
36796+ atomic_read_unchecked(&ddb_entry->
36797 relogin_retry_count))
36798 );
36799 start_dpc++;
36800diff -urNp linux-2.6.32.46/drivers/scsi/scsi.c linux-2.6.32.46/drivers/scsi/scsi.c
36801--- linux-2.6.32.46/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
36802+++ linux-2.6.32.46/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
36803@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
36804 unsigned long timeout;
36805 int rtn = 0;
36806
36807- atomic_inc(&cmd->device->iorequest_cnt);
36808+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36809
36810 /* check if the device is still usable */
36811 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
36812diff -urNp linux-2.6.32.46/drivers/scsi/scsi_debug.c linux-2.6.32.46/drivers/scsi/scsi_debug.c
36813--- linux-2.6.32.46/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
36814+++ linux-2.6.32.46/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
36815@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
36816 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
36817 unsigned char *cmd = (unsigned char *)scp->cmnd;
36818
36819+ pax_track_stack();
36820+
36821 if ((errsts = check_readiness(scp, 1, devip)))
36822 return errsts;
36823 memset(arr, 0, sizeof(arr));
36824@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
36825 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
36826 unsigned char *cmd = (unsigned char *)scp->cmnd;
36827
36828+ pax_track_stack();
36829+
36830 if ((errsts = check_readiness(scp, 1, devip)))
36831 return errsts;
36832 memset(arr, 0, sizeof(arr));
36833diff -urNp linux-2.6.32.46/drivers/scsi/scsi_lib.c linux-2.6.32.46/drivers/scsi/scsi_lib.c
36834--- linux-2.6.32.46/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
36835+++ linux-2.6.32.46/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
36836@@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
36837
36838 scsi_init_cmd_errh(cmd);
36839 cmd->result = DID_NO_CONNECT << 16;
36840- atomic_inc(&cmd->device->iorequest_cnt);
36841+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36842
36843 /*
36844 * SCSI request completion path will do scsi_device_unbusy(),
36845@@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
36846 */
36847 cmd->serial_number = 0;
36848
36849- atomic_inc(&cmd->device->iodone_cnt);
36850+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
36851 if (cmd->result)
36852- atomic_inc(&cmd->device->ioerr_cnt);
36853+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36854
36855 disposition = scsi_decide_disposition(cmd);
36856 if (disposition != SUCCESS &&
36857diff -urNp linux-2.6.32.46/drivers/scsi/scsi_sysfs.c linux-2.6.32.46/drivers/scsi/scsi_sysfs.c
36858--- linux-2.6.32.46/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:34.000000000 -0400
36859+++ linux-2.6.32.46/drivers/scsi/scsi_sysfs.c 2011-06-25 12:56:37.000000000 -0400
36860@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
36861 char *buf) \
36862 { \
36863 struct scsi_device *sdev = to_scsi_device(dev); \
36864- unsigned long long count = atomic_read(&sdev->field); \
36865+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
36866 return snprintf(buf, 20, "0x%llx\n", count); \
36867 } \
36868 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
36869diff -urNp linux-2.6.32.46/drivers/scsi/scsi_transport_fc.c linux-2.6.32.46/drivers/scsi/scsi_transport_fc.c
36870--- linux-2.6.32.46/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
36871+++ linux-2.6.32.46/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
36872@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
36873 * Netlink Infrastructure
36874 */
36875
36876-static atomic_t fc_event_seq;
36877+static atomic_unchecked_t fc_event_seq;
36878
36879 /**
36880 * fc_get_event_number - Obtain the next sequential FC event number
36881@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
36882 u32
36883 fc_get_event_number(void)
36884 {
36885- return atomic_add_return(1, &fc_event_seq);
36886+ return atomic_add_return_unchecked(1, &fc_event_seq);
36887 }
36888 EXPORT_SYMBOL(fc_get_event_number);
36889
36890@@ -641,7 +641,7 @@ static __init int fc_transport_init(void
36891 {
36892 int error;
36893
36894- atomic_set(&fc_event_seq, 0);
36895+ atomic_set_unchecked(&fc_event_seq, 0);
36896
36897 error = transport_class_register(&fc_host_class);
36898 if (error)
36899diff -urNp linux-2.6.32.46/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.46/drivers/scsi/scsi_transport_iscsi.c
36900--- linux-2.6.32.46/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
36901+++ linux-2.6.32.46/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
36902@@ -81,7 +81,7 @@ struct iscsi_internal {
36903 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
36904 };
36905
36906-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36907+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36908 static struct workqueue_struct *iscsi_eh_timer_workq;
36909
36910 /*
36911@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
36912 int err;
36913
36914 ihost = shost->shost_data;
36915- session->sid = atomic_add_return(1, &iscsi_session_nr);
36916+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36917
36918 if (id == ISCSI_MAX_TARGET) {
36919 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
36920@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
36921 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36922 ISCSI_TRANSPORT_VERSION);
36923
36924- atomic_set(&iscsi_session_nr, 0);
36925+ atomic_set_unchecked(&iscsi_session_nr, 0);
36926
36927 err = class_register(&iscsi_transport_class);
36928 if (err)
36929diff -urNp linux-2.6.32.46/drivers/scsi/scsi_transport_srp.c linux-2.6.32.46/drivers/scsi/scsi_transport_srp.c
36930--- linux-2.6.32.46/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
36931+++ linux-2.6.32.46/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
36932@@ -33,7 +33,7 @@
36933 #include "scsi_transport_srp_internal.h"
36934
36935 struct srp_host_attrs {
36936- atomic_t next_port_id;
36937+ atomic_unchecked_t next_port_id;
36938 };
36939 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
36940
36941@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
36942 struct Scsi_Host *shost = dev_to_shost(dev);
36943 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
36944
36945- atomic_set(&srp_host->next_port_id, 0);
36946+ atomic_set_unchecked(&srp_host->next_port_id, 0);
36947 return 0;
36948 }
36949
36950@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
36951 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
36952 rport->roles = ids->roles;
36953
36954- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
36955+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
36956 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
36957
36958 transport_setup_device(&rport->dev);
36959diff -urNp linux-2.6.32.46/drivers/scsi/sg.c linux-2.6.32.46/drivers/scsi/sg.c
36960--- linux-2.6.32.46/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
36961+++ linux-2.6.32.46/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
36962@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
36963 const struct file_operations * fops;
36964 };
36965
36966-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
36967+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
36968 {"allow_dio", &adio_fops},
36969 {"debug", &debug_fops},
36970 {"def_reserved_size", &dressz_fops},
36971@@ -2307,7 +2307,7 @@ sg_proc_init(void)
36972 {
36973 int k, mask;
36974 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
36975- struct sg_proc_leaf * leaf;
36976+ const struct sg_proc_leaf * leaf;
36977
36978 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
36979 if (!sg_proc_sgp)
36980diff -urNp linux-2.6.32.46/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.46/drivers/scsi/sym53c8xx_2/sym_glue.c
36981--- linux-2.6.32.46/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
36982+++ linux-2.6.32.46/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
36983@@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
36984 int do_iounmap = 0;
36985 int do_disable_device = 1;
36986
36987+ pax_track_stack();
36988+
36989 memset(&sym_dev, 0, sizeof(sym_dev));
36990 memset(&nvram, 0, sizeof(nvram));
36991 sym_dev.pdev = pdev;
36992diff -urNp linux-2.6.32.46/drivers/serial/kgdboc.c linux-2.6.32.46/drivers/serial/kgdboc.c
36993--- linux-2.6.32.46/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
36994+++ linux-2.6.32.46/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
36995@@ -18,7 +18,7 @@
36996
36997 #define MAX_CONFIG_LEN 40
36998
36999-static struct kgdb_io kgdboc_io_ops;
37000+static const struct kgdb_io kgdboc_io_ops;
37001
37002 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37003 static int configured = -1;
37004@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
37005 module_put(THIS_MODULE);
37006 }
37007
37008-static struct kgdb_io kgdboc_io_ops = {
37009+static const struct kgdb_io kgdboc_io_ops = {
37010 .name = "kgdboc",
37011 .read_char = kgdboc_get_char,
37012 .write_char = kgdboc_put_char,
37013diff -urNp linux-2.6.32.46/drivers/spi/spi.c linux-2.6.32.46/drivers/spi/spi.c
37014--- linux-2.6.32.46/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
37015+++ linux-2.6.32.46/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
37016@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
37017 EXPORT_SYMBOL_GPL(spi_sync);
37018
37019 /* portable code must never pass more than 32 bytes */
37020-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37021+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
37022
37023 static u8 *buf;
37024
37025diff -urNp linux-2.6.32.46/drivers/staging/android/binder.c linux-2.6.32.46/drivers/staging/android/binder.c
37026--- linux-2.6.32.46/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
37027+++ linux-2.6.32.46/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
37028@@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
37029 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
37030 }
37031
37032-static struct vm_operations_struct binder_vm_ops = {
37033+static const struct vm_operations_struct binder_vm_ops = {
37034 .open = binder_vma_open,
37035 .close = binder_vma_close,
37036 };
37037diff -urNp linux-2.6.32.46/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.46/drivers/staging/b3dfg/b3dfg.c
37038--- linux-2.6.32.46/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
37039+++ linux-2.6.32.46/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
37040@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
37041 return VM_FAULT_NOPAGE;
37042 }
37043
37044-static struct vm_operations_struct b3dfg_vm_ops = {
37045+static const struct vm_operations_struct b3dfg_vm_ops = {
37046 .fault = b3dfg_vma_fault,
37047 };
37048
37049@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
37050 return r;
37051 }
37052
37053-static struct file_operations b3dfg_fops = {
37054+static const struct file_operations b3dfg_fops = {
37055 .owner = THIS_MODULE,
37056 .open = b3dfg_open,
37057 .release = b3dfg_release,
37058diff -urNp linux-2.6.32.46/drivers/staging/comedi/comedi_fops.c linux-2.6.32.46/drivers/staging/comedi/comedi_fops.c
37059--- linux-2.6.32.46/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:35:29.000000000 -0400
37060+++ linux-2.6.32.46/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:34:00.000000000 -0400
37061@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
37062 mutex_unlock(&dev->mutex);
37063 }
37064
37065-static struct vm_operations_struct comedi_vm_ops = {
37066+static const struct vm_operations_struct comedi_vm_ops = {
37067 .close = comedi_unmap,
37068 };
37069
37070diff -urNp linux-2.6.32.46/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.46/drivers/staging/dream/qdsp5/adsp_driver.c
37071--- linux-2.6.32.46/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
37072+++ linux-2.6.32.46/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
37073@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
37074 static dev_t adsp_devno;
37075 static struct class *adsp_class;
37076
37077-static struct file_operations adsp_fops = {
37078+static const struct file_operations adsp_fops = {
37079 .owner = THIS_MODULE,
37080 .open = adsp_open,
37081 .unlocked_ioctl = adsp_ioctl,
37082diff -urNp linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_aac.c
37083--- linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
37084+++ linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
37085@@ -1022,7 +1022,7 @@ done:
37086 return rc;
37087 }
37088
37089-static struct file_operations audio_aac_fops = {
37090+static const struct file_operations audio_aac_fops = {
37091 .owner = THIS_MODULE,
37092 .open = audio_open,
37093 .release = audio_release,
37094diff -urNp linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_amrnb.c
37095--- linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
37096+++ linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
37097@@ -833,7 +833,7 @@ done:
37098 return rc;
37099 }
37100
37101-static struct file_operations audio_amrnb_fops = {
37102+static const struct file_operations audio_amrnb_fops = {
37103 .owner = THIS_MODULE,
37104 .open = audamrnb_open,
37105 .release = audamrnb_release,
37106diff -urNp linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_evrc.c
37107--- linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
37108+++ linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
37109@@ -805,7 +805,7 @@ dma_fail:
37110 return rc;
37111 }
37112
37113-static struct file_operations audio_evrc_fops = {
37114+static const struct file_operations audio_evrc_fops = {
37115 .owner = THIS_MODULE,
37116 .open = audevrc_open,
37117 .release = audevrc_release,
37118diff -urNp linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_in.c
37119--- linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
37120+++ linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
37121@@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
37122 return 0;
37123 }
37124
37125-static struct file_operations audio_fops = {
37126+static const struct file_operations audio_fops = {
37127 .owner = THIS_MODULE,
37128 .open = audio_in_open,
37129 .release = audio_in_release,
37130@@ -922,7 +922,7 @@ static struct file_operations audio_fops
37131 .unlocked_ioctl = audio_in_ioctl,
37132 };
37133
37134-static struct file_operations audpre_fops = {
37135+static const struct file_operations audpre_fops = {
37136 .owner = THIS_MODULE,
37137 .open = audpre_open,
37138 .unlocked_ioctl = audpre_ioctl,
37139diff -urNp linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_mp3.c
37140--- linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
37141+++ linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
37142@@ -941,7 +941,7 @@ done:
37143 return rc;
37144 }
37145
37146-static struct file_operations audio_mp3_fops = {
37147+static const struct file_operations audio_mp3_fops = {
37148 .owner = THIS_MODULE,
37149 .open = audio_open,
37150 .release = audio_release,
37151diff -urNp linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_out.c
37152--- linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
37153+++ linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
37154@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
37155 return 0;
37156 }
37157
37158-static struct file_operations audio_fops = {
37159+static const struct file_operations audio_fops = {
37160 .owner = THIS_MODULE,
37161 .open = audio_open,
37162 .release = audio_release,
37163@@ -819,7 +819,7 @@ static struct file_operations audio_fops
37164 .unlocked_ioctl = audio_ioctl,
37165 };
37166
37167-static struct file_operations audpp_fops = {
37168+static const struct file_operations audpp_fops = {
37169 .owner = THIS_MODULE,
37170 .open = audpp_open,
37171 .unlocked_ioctl = audpp_ioctl,
37172diff -urNp linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_qcelp.c
37173--- linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
37174+++ linux-2.6.32.46/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
37175@@ -816,7 +816,7 @@ err:
37176 return rc;
37177 }
37178
37179-static struct file_operations audio_qcelp_fops = {
37180+static const struct file_operations audio_qcelp_fops = {
37181 .owner = THIS_MODULE,
37182 .open = audqcelp_open,
37183 .release = audqcelp_release,
37184diff -urNp linux-2.6.32.46/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.46/drivers/staging/dream/qdsp5/snd.c
37185--- linux-2.6.32.46/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
37186+++ linux-2.6.32.46/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
37187@@ -242,7 +242,7 @@ err:
37188 return rc;
37189 }
37190
37191-static struct file_operations snd_fops = {
37192+static const struct file_operations snd_fops = {
37193 .owner = THIS_MODULE,
37194 .open = snd_open,
37195 .release = snd_release,
37196diff -urNp linux-2.6.32.46/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.46/drivers/staging/dream/smd/smd_qmi.c
37197--- linux-2.6.32.46/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
37198+++ linux-2.6.32.46/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
37199@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
37200 return 0;
37201 }
37202
37203-static struct file_operations qmi_fops = {
37204+static const struct file_operations qmi_fops = {
37205 .owner = THIS_MODULE,
37206 .read = qmi_read,
37207 .write = qmi_write,
37208diff -urNp linux-2.6.32.46/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.46/drivers/staging/dream/smd/smd_rpcrouter_device.c
37209--- linux-2.6.32.46/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
37210+++ linux-2.6.32.46/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
37211@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
37212 return rc;
37213 }
37214
37215-static struct file_operations rpcrouter_server_fops = {
37216+static const struct file_operations rpcrouter_server_fops = {
37217 .owner = THIS_MODULE,
37218 .open = rpcrouter_open,
37219 .release = rpcrouter_release,
37220@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
37221 .unlocked_ioctl = rpcrouter_ioctl,
37222 };
37223
37224-static struct file_operations rpcrouter_router_fops = {
37225+static const struct file_operations rpcrouter_router_fops = {
37226 .owner = THIS_MODULE,
37227 .open = rpcrouter_open,
37228 .release = rpcrouter_release,
37229diff -urNp linux-2.6.32.46/drivers/staging/dst/dcore.c linux-2.6.32.46/drivers/staging/dst/dcore.c
37230--- linux-2.6.32.46/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
37231+++ linux-2.6.32.46/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
37232@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
37233 return 0;
37234 }
37235
37236-static struct block_device_operations dst_blk_ops = {
37237+static const struct block_device_operations dst_blk_ops = {
37238 .open = dst_bdev_open,
37239 .release = dst_bdev_release,
37240 .owner = THIS_MODULE,
37241@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
37242 n->size = ctl->size;
37243
37244 atomic_set(&n->refcnt, 1);
37245- atomic_long_set(&n->gen, 0);
37246+ atomic_long_set_unchecked(&n->gen, 0);
37247 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
37248
37249 err = dst_node_sysfs_init(n);
37250diff -urNp linux-2.6.32.46/drivers/staging/dst/trans.c linux-2.6.32.46/drivers/staging/dst/trans.c
37251--- linux-2.6.32.46/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
37252+++ linux-2.6.32.46/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
37253@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
37254 t->error = 0;
37255 t->retries = 0;
37256 atomic_set(&t->refcnt, 1);
37257- t->gen = atomic_long_inc_return(&n->gen);
37258+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
37259
37260 t->enc = bio_data_dir(bio);
37261 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
37262diff -urNp linux-2.6.32.46/drivers/staging/et131x/et1310_tx.c linux-2.6.32.46/drivers/staging/et131x/et1310_tx.c
37263--- linux-2.6.32.46/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
37264+++ linux-2.6.32.46/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
37265@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
37266 struct net_device_stats *stats = &etdev->net_stats;
37267
37268 if (pMpTcb->Flags & fMP_DEST_BROAD)
37269- atomic_inc(&etdev->Stats.brdcstxmt);
37270+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
37271 else if (pMpTcb->Flags & fMP_DEST_MULTI)
37272- atomic_inc(&etdev->Stats.multixmt);
37273+ atomic_inc_unchecked(&etdev->Stats.multixmt);
37274 else
37275- atomic_inc(&etdev->Stats.unixmt);
37276+ atomic_inc_unchecked(&etdev->Stats.unixmt);
37277
37278 if (pMpTcb->Packet) {
37279 stats->tx_bytes += pMpTcb->Packet->len;
37280diff -urNp linux-2.6.32.46/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.46/drivers/staging/et131x/et131x_adapter.h
37281--- linux-2.6.32.46/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
37282+++ linux-2.6.32.46/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
37283@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
37284 * operations
37285 */
37286 u32 unircv; /* # multicast packets received */
37287- atomic_t unixmt; /* # multicast packets for Tx */
37288+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
37289 u32 multircv; /* # multicast packets received */
37290- atomic_t multixmt; /* # multicast packets for Tx */
37291+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
37292 u32 brdcstrcv; /* # broadcast packets received */
37293- atomic_t brdcstxmt; /* # broadcast packets for Tx */
37294+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
37295 u32 norcvbuf; /* # Rx packets discarded */
37296 u32 noxmtbuf; /* # Tx packets discarded */
37297
37298diff -urNp linux-2.6.32.46/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.46/drivers/staging/go7007/go7007-v4l2.c
37299--- linux-2.6.32.46/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
37300+++ linux-2.6.32.46/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
37301@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
37302 return 0;
37303 }
37304
37305-static struct vm_operations_struct go7007_vm_ops = {
37306+static const struct vm_operations_struct go7007_vm_ops = {
37307 .open = go7007_vm_open,
37308 .close = go7007_vm_close,
37309 .fault = go7007_vm_fault,
37310diff -urNp linux-2.6.32.46/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.46/drivers/staging/hv/blkvsc_drv.c
37311--- linux-2.6.32.46/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
37312+++ linux-2.6.32.46/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
37313@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
37314 /* The one and only one */
37315 static struct blkvsc_driver_context g_blkvsc_drv;
37316
37317-static struct block_device_operations block_ops = {
37318+static const struct block_device_operations block_ops = {
37319 .owner = THIS_MODULE,
37320 .open = blkvsc_open,
37321 .release = blkvsc_release,
37322diff -urNp linux-2.6.32.46/drivers/staging/hv/Channel.c linux-2.6.32.46/drivers/staging/hv/Channel.c
37323--- linux-2.6.32.46/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
37324+++ linux-2.6.32.46/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
37325@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
37326
37327 DPRINT_ENTER(VMBUS);
37328
37329- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
37330- atomic_inc(&gVmbusConnection.NextGpadlHandle);
37331+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
37332+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
37333
37334 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
37335 ASSERT(msgInfo != NULL);
37336diff -urNp linux-2.6.32.46/drivers/staging/hv/Hv.c linux-2.6.32.46/drivers/staging/hv/Hv.c
37337--- linux-2.6.32.46/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
37338+++ linux-2.6.32.46/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
37339@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
37340 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
37341 u32 outputAddressHi = outputAddress >> 32;
37342 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
37343- volatile void *hypercallPage = gHvContext.HypercallPage;
37344+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
37345
37346 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
37347 Control, Input, Output);
37348diff -urNp linux-2.6.32.46/drivers/staging/hv/VmbusApi.h linux-2.6.32.46/drivers/staging/hv/VmbusApi.h
37349--- linux-2.6.32.46/drivers/staging/hv/VmbusApi.h 2011-03-27 14:31:47.000000000 -0400
37350+++ linux-2.6.32.46/drivers/staging/hv/VmbusApi.h 2011-08-29 22:32:57.000000000 -0400
37351@@ -109,7 +109,7 @@ struct vmbus_channel_interface {
37352 u32 *GpadlHandle);
37353 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
37354 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
37355-};
37356+} __no_const;
37357
37358 /* Base driver object */
37359 struct hv_driver {
37360diff -urNp linux-2.6.32.46/drivers/staging/hv/vmbus_drv.c linux-2.6.32.46/drivers/staging/hv/vmbus_drv.c
37361--- linux-2.6.32.46/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
37362+++ linux-2.6.32.46/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
37363@@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
37364 to_device_context(root_device_obj);
37365 struct device_context *child_device_ctx =
37366 to_device_context(child_device_obj);
37367- static atomic_t device_num = ATOMIC_INIT(0);
37368+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
37369
37370 DPRINT_ENTER(VMBUS_DRV);
37371
37372@@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
37373
37374 /* Set the device name. Otherwise, device_register() will fail. */
37375 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
37376- atomic_inc_return(&device_num));
37377+ atomic_inc_return_unchecked(&device_num));
37378
37379 /* The new device belongs to this bus */
37380 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
37381diff -urNp linux-2.6.32.46/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.46/drivers/staging/hv/VmbusPrivate.h
37382--- linux-2.6.32.46/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
37383+++ linux-2.6.32.46/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
37384@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
37385 struct VMBUS_CONNECTION {
37386 enum VMBUS_CONNECT_STATE ConnectState;
37387
37388- atomic_t NextGpadlHandle;
37389+ atomic_unchecked_t NextGpadlHandle;
37390
37391 /*
37392 * Represents channel interrupts. Each bit position represents a
37393diff -urNp linux-2.6.32.46/drivers/staging/iio/ring_generic.h linux-2.6.32.46/drivers/staging/iio/ring_generic.h
37394--- linux-2.6.32.46/drivers/staging/iio/ring_generic.h 2011-03-27 14:31:47.000000000 -0400
37395+++ linux-2.6.32.46/drivers/staging/iio/ring_generic.h 2011-08-23 20:24:26.000000000 -0400
37396@@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
37397
37398 int (*is_enabled)(struct iio_ring_buffer *ring);
37399 int (*enable)(struct iio_ring_buffer *ring);
37400-};
37401+} __no_const;
37402
37403 /**
37404 * struct iio_ring_buffer - general ring buffer structure
37405diff -urNp linux-2.6.32.46/drivers/staging/octeon/ethernet.c linux-2.6.32.46/drivers/staging/octeon/ethernet.c
37406--- linux-2.6.32.46/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
37407+++ linux-2.6.32.46/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
37408@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
37409 * since the RX tasklet also increments it.
37410 */
37411 #ifdef CONFIG_64BIT
37412- atomic64_add(rx_status.dropped_packets,
37413- (atomic64_t *)&priv->stats.rx_dropped);
37414+ atomic64_add_unchecked(rx_status.dropped_packets,
37415+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37416 #else
37417- atomic_add(rx_status.dropped_packets,
37418- (atomic_t *)&priv->stats.rx_dropped);
37419+ atomic_add_unchecked(rx_status.dropped_packets,
37420+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
37421 #endif
37422 }
37423
37424diff -urNp linux-2.6.32.46/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.46/drivers/staging/octeon/ethernet-rx.c
37425--- linux-2.6.32.46/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
37426+++ linux-2.6.32.46/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
37427@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
37428 /* Increment RX stats for virtual ports */
37429 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37430 #ifdef CONFIG_64BIT
37431- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37432- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37433+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37434+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37435 #else
37436- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37437- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37438+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37439+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37440 #endif
37441 }
37442 netif_receive_skb(skb);
37443@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
37444 dev->name);
37445 */
37446 #ifdef CONFIG_64BIT
37447- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37448+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
37449 #else
37450- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37451+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
37452 #endif
37453 dev_kfree_skb_irq(skb);
37454 }
37455diff -urNp linux-2.6.32.46/drivers/staging/panel/panel.c linux-2.6.32.46/drivers/staging/panel/panel.c
37456--- linux-2.6.32.46/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
37457+++ linux-2.6.32.46/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
37458@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
37459 return 0;
37460 }
37461
37462-static struct file_operations lcd_fops = {
37463+static const struct file_operations lcd_fops = {
37464 .write = lcd_write,
37465 .open = lcd_open,
37466 .release = lcd_release,
37467@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
37468 return 0;
37469 }
37470
37471-static struct file_operations keypad_fops = {
37472+static const struct file_operations keypad_fops = {
37473 .read = keypad_read, /* read */
37474 .open = keypad_open, /* open */
37475 .release = keypad_release, /* close */
37476diff -urNp linux-2.6.32.46/drivers/staging/phison/phison.c linux-2.6.32.46/drivers/staging/phison/phison.c
37477--- linux-2.6.32.46/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
37478+++ linux-2.6.32.46/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
37479@@ -43,7 +43,7 @@ static struct scsi_host_template phison_
37480 ATA_BMDMA_SHT(DRV_NAME),
37481 };
37482
37483-static struct ata_port_operations phison_ops = {
37484+static const struct ata_port_operations phison_ops = {
37485 .inherits = &ata_bmdma_port_ops,
37486 .prereset = phison_pre_reset,
37487 };
37488diff -urNp linux-2.6.32.46/drivers/staging/poch/poch.c linux-2.6.32.46/drivers/staging/poch/poch.c
37489--- linux-2.6.32.46/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
37490+++ linux-2.6.32.46/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
37491@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
37492 return 0;
37493 }
37494
37495-static struct file_operations poch_fops = {
37496+static const struct file_operations poch_fops = {
37497 .owner = THIS_MODULE,
37498 .open = poch_open,
37499 .release = poch_release,
37500diff -urNp linux-2.6.32.46/drivers/staging/pohmelfs/inode.c linux-2.6.32.46/drivers/staging/pohmelfs/inode.c
37501--- linux-2.6.32.46/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37502+++ linux-2.6.32.46/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
37503@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
37504 mutex_init(&psb->mcache_lock);
37505 psb->mcache_root = RB_ROOT;
37506 psb->mcache_timeout = msecs_to_jiffies(5000);
37507- atomic_long_set(&psb->mcache_gen, 0);
37508+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
37509
37510 psb->trans_max_pages = 100;
37511
37512@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
37513 INIT_LIST_HEAD(&psb->crypto_ready_list);
37514 INIT_LIST_HEAD(&psb->crypto_active_list);
37515
37516- atomic_set(&psb->trans_gen, 1);
37517+ atomic_set_unchecked(&psb->trans_gen, 1);
37518 atomic_long_set(&psb->total_inodes, 0);
37519
37520 mutex_init(&psb->state_lock);
37521diff -urNp linux-2.6.32.46/drivers/staging/pohmelfs/mcache.c linux-2.6.32.46/drivers/staging/pohmelfs/mcache.c
37522--- linux-2.6.32.46/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
37523+++ linux-2.6.32.46/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
37524@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
37525 m->data = data;
37526 m->start = start;
37527 m->size = size;
37528- m->gen = atomic_long_inc_return(&psb->mcache_gen);
37529+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
37530
37531 mutex_lock(&psb->mcache_lock);
37532 err = pohmelfs_mcache_insert(psb, m);
37533diff -urNp linux-2.6.32.46/drivers/staging/pohmelfs/netfs.h linux-2.6.32.46/drivers/staging/pohmelfs/netfs.h
37534--- linux-2.6.32.46/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
37535+++ linux-2.6.32.46/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
37536@@ -570,14 +570,14 @@ struct pohmelfs_config;
37537 struct pohmelfs_sb {
37538 struct rb_root mcache_root;
37539 struct mutex mcache_lock;
37540- atomic_long_t mcache_gen;
37541+ atomic_long_unchecked_t mcache_gen;
37542 unsigned long mcache_timeout;
37543
37544 unsigned int idx;
37545
37546 unsigned int trans_retries;
37547
37548- atomic_t trans_gen;
37549+ atomic_unchecked_t trans_gen;
37550
37551 unsigned int crypto_attached_size;
37552 unsigned int crypto_align_size;
37553diff -urNp linux-2.6.32.46/drivers/staging/pohmelfs/trans.c linux-2.6.32.46/drivers/staging/pohmelfs/trans.c
37554--- linux-2.6.32.46/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
37555+++ linux-2.6.32.46/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
37556@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
37557 int err;
37558 struct netfs_cmd *cmd = t->iovec.iov_base;
37559
37560- t->gen = atomic_inc_return(&psb->trans_gen);
37561+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
37562
37563 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
37564 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
37565diff -urNp linux-2.6.32.46/drivers/staging/sep/sep_driver.c linux-2.6.32.46/drivers/staging/sep/sep_driver.c
37566--- linux-2.6.32.46/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
37567+++ linux-2.6.32.46/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
37568@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
37569 static dev_t sep_devno;
37570
37571 /* the files operations structure of the driver */
37572-static struct file_operations sep_file_operations = {
37573+static const struct file_operations sep_file_operations = {
37574 .owner = THIS_MODULE,
37575 .ioctl = sep_ioctl,
37576 .poll = sep_poll,
37577diff -urNp linux-2.6.32.46/drivers/staging/usbip/usbip_common.h linux-2.6.32.46/drivers/staging/usbip/usbip_common.h
37578--- linux-2.6.32.46/drivers/staging/usbip/usbip_common.h 2011-04-17 17:00:52.000000000 -0400
37579+++ linux-2.6.32.46/drivers/staging/usbip/usbip_common.h 2011-08-23 20:24:26.000000000 -0400
37580@@ -374,7 +374,7 @@ struct usbip_device {
37581 void (*shutdown)(struct usbip_device *);
37582 void (*reset)(struct usbip_device *);
37583 void (*unusable)(struct usbip_device *);
37584- } eh_ops;
37585+ } __no_const eh_ops;
37586 };
37587
37588
37589diff -urNp linux-2.6.32.46/drivers/staging/usbip/vhci.h linux-2.6.32.46/drivers/staging/usbip/vhci.h
37590--- linux-2.6.32.46/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
37591+++ linux-2.6.32.46/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
37592@@ -92,7 +92,7 @@ struct vhci_hcd {
37593 unsigned resuming:1;
37594 unsigned long re_timeout;
37595
37596- atomic_t seqnum;
37597+ atomic_unchecked_t seqnum;
37598
37599 /*
37600 * NOTE:
37601diff -urNp linux-2.6.32.46/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.46/drivers/staging/usbip/vhci_hcd.c
37602--- linux-2.6.32.46/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
37603+++ linux-2.6.32.46/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
37604@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
37605 return;
37606 }
37607
37608- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37609+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37610 if (priv->seqnum == 0xffff)
37611 usbip_uinfo("seqnum max\n");
37612
37613@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
37614 return -ENOMEM;
37615 }
37616
37617- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37618+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37619 if (unlink->seqnum == 0xffff)
37620 usbip_uinfo("seqnum max\n");
37621
37622@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
37623 vdev->rhport = rhport;
37624 }
37625
37626- atomic_set(&vhci->seqnum, 0);
37627+ atomic_set_unchecked(&vhci->seqnum, 0);
37628 spin_lock_init(&vhci->lock);
37629
37630
37631diff -urNp linux-2.6.32.46/drivers/staging/usbip/vhci_rx.c linux-2.6.32.46/drivers/staging/usbip/vhci_rx.c
37632--- linux-2.6.32.46/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
37633+++ linux-2.6.32.46/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
37634@@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
37635 usbip_uerr("cannot find a urb of seqnum %u\n",
37636 pdu->base.seqnum);
37637 usbip_uinfo("max seqnum %d\n",
37638- atomic_read(&the_controller->seqnum));
37639+ atomic_read_unchecked(&the_controller->seqnum));
37640 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37641 return;
37642 }
37643diff -urNp linux-2.6.32.46/drivers/staging/vme/devices/vme_user.c linux-2.6.32.46/drivers/staging/vme/devices/vme_user.c
37644--- linux-2.6.32.46/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
37645+++ linux-2.6.32.46/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
37646@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
37647 static int __init vme_user_probe(struct device *, int, int);
37648 static int __exit vme_user_remove(struct device *, int, int);
37649
37650-static struct file_operations vme_user_fops = {
37651+static const struct file_operations vme_user_fops = {
37652 .open = vme_user_open,
37653 .release = vme_user_release,
37654 .read = vme_user_read,
37655diff -urNp linux-2.6.32.46/drivers/staging/wlan-ng/hfa384x_usb.c linux-2.6.32.46/drivers/staging/wlan-ng/hfa384x_usb.c
37656--- linux-2.6.32.46/drivers/staging/wlan-ng/hfa384x_usb.c 2011-03-27 14:31:47.000000000 -0400
37657+++ linux-2.6.32.46/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-23 20:24:26.000000000 -0400
37658@@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hf
37659
37660 struct usbctlx_completor {
37661 int (*complete) (struct usbctlx_completor *);
37662-};
37663+} __no_const;
37664 typedef struct usbctlx_completor usbctlx_completor_t;
37665
37666 static int
37667diff -urNp linux-2.6.32.46/drivers/telephony/ixj.c linux-2.6.32.46/drivers/telephony/ixj.c
37668--- linux-2.6.32.46/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
37669+++ linux-2.6.32.46/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
37670@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
37671 bool mContinue;
37672 char *pIn, *pOut;
37673
37674+ pax_track_stack();
37675+
37676 if (!SCI_Prepare(j))
37677 return 0;
37678
37679diff -urNp linux-2.6.32.46/drivers/uio/uio.c linux-2.6.32.46/drivers/uio/uio.c
37680--- linux-2.6.32.46/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
37681+++ linux-2.6.32.46/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
37682@@ -23,6 +23,7 @@
37683 #include <linux/string.h>
37684 #include <linux/kobject.h>
37685 #include <linux/uio_driver.h>
37686+#include <asm/local.h>
37687
37688 #define UIO_MAX_DEVICES 255
37689
37690@@ -30,10 +31,10 @@ struct uio_device {
37691 struct module *owner;
37692 struct device *dev;
37693 int minor;
37694- atomic_t event;
37695+ atomic_unchecked_t event;
37696 struct fasync_struct *async_queue;
37697 wait_queue_head_t wait;
37698- int vma_count;
37699+ local_t vma_count;
37700 struct uio_info *info;
37701 struct kobject *map_dir;
37702 struct kobject *portio_dir;
37703@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
37704 return entry->show(mem, buf);
37705 }
37706
37707-static struct sysfs_ops map_sysfs_ops = {
37708+static const struct sysfs_ops map_sysfs_ops = {
37709 .show = map_type_show,
37710 };
37711
37712@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
37713 return entry->show(port, buf);
37714 }
37715
37716-static struct sysfs_ops portio_sysfs_ops = {
37717+static const struct sysfs_ops portio_sysfs_ops = {
37718 .show = portio_type_show,
37719 };
37720
37721@@ -255,7 +256,7 @@ static ssize_t show_event(struct device
37722 struct uio_device *idev = dev_get_drvdata(dev);
37723 if (idev)
37724 return sprintf(buf, "%u\n",
37725- (unsigned int)atomic_read(&idev->event));
37726+ (unsigned int)atomic_read_unchecked(&idev->event));
37727 else
37728 return -ENODEV;
37729 }
37730@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
37731 {
37732 struct uio_device *idev = info->uio_dev;
37733
37734- atomic_inc(&idev->event);
37735+ atomic_inc_unchecked(&idev->event);
37736 wake_up_interruptible(&idev->wait);
37737 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
37738 }
37739@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
37740 }
37741
37742 listener->dev = idev;
37743- listener->event_count = atomic_read(&idev->event);
37744+ listener->event_count = atomic_read_unchecked(&idev->event);
37745 filep->private_data = listener;
37746
37747 if (idev->info->open) {
37748@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
37749 return -EIO;
37750
37751 poll_wait(filep, &idev->wait, wait);
37752- if (listener->event_count != atomic_read(&idev->event))
37753+ if (listener->event_count != atomic_read_unchecked(&idev->event))
37754 return POLLIN | POLLRDNORM;
37755 return 0;
37756 }
37757@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
37758 do {
37759 set_current_state(TASK_INTERRUPTIBLE);
37760
37761- event_count = atomic_read(&idev->event);
37762+ event_count = atomic_read_unchecked(&idev->event);
37763 if (event_count != listener->event_count) {
37764 if (copy_to_user(buf, &event_count, count))
37765 retval = -EFAULT;
37766@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
37767 static void uio_vma_open(struct vm_area_struct *vma)
37768 {
37769 struct uio_device *idev = vma->vm_private_data;
37770- idev->vma_count++;
37771+ local_inc(&idev->vma_count);
37772 }
37773
37774 static void uio_vma_close(struct vm_area_struct *vma)
37775 {
37776 struct uio_device *idev = vma->vm_private_data;
37777- idev->vma_count--;
37778+ local_dec(&idev->vma_count);
37779 }
37780
37781 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
37782@@ -840,7 +841,7 @@ int __uio_register_device(struct module
37783 idev->owner = owner;
37784 idev->info = info;
37785 init_waitqueue_head(&idev->wait);
37786- atomic_set(&idev->event, 0);
37787+ atomic_set_unchecked(&idev->event, 0);
37788
37789 ret = uio_get_minor(idev);
37790 if (ret)
37791diff -urNp linux-2.6.32.46/drivers/usb/atm/usbatm.c linux-2.6.32.46/drivers/usb/atm/usbatm.c
37792--- linux-2.6.32.46/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
37793+++ linux-2.6.32.46/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
37794@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
37795 if (printk_ratelimit())
37796 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
37797 __func__, vpi, vci);
37798- atomic_inc(&vcc->stats->rx_err);
37799+ atomic_inc_unchecked(&vcc->stats->rx_err);
37800 return;
37801 }
37802
37803@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
37804 if (length > ATM_MAX_AAL5_PDU) {
37805 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
37806 __func__, length, vcc);
37807- atomic_inc(&vcc->stats->rx_err);
37808+ atomic_inc_unchecked(&vcc->stats->rx_err);
37809 goto out;
37810 }
37811
37812@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
37813 if (sarb->len < pdu_length) {
37814 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
37815 __func__, pdu_length, sarb->len, vcc);
37816- atomic_inc(&vcc->stats->rx_err);
37817+ atomic_inc_unchecked(&vcc->stats->rx_err);
37818 goto out;
37819 }
37820
37821 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
37822 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
37823 __func__, vcc);
37824- atomic_inc(&vcc->stats->rx_err);
37825+ atomic_inc_unchecked(&vcc->stats->rx_err);
37826 goto out;
37827 }
37828
37829@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
37830 if (printk_ratelimit())
37831 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
37832 __func__, length);
37833- atomic_inc(&vcc->stats->rx_drop);
37834+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37835 goto out;
37836 }
37837
37838@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
37839
37840 vcc->push(vcc, skb);
37841
37842- atomic_inc(&vcc->stats->rx);
37843+ atomic_inc_unchecked(&vcc->stats->rx);
37844 out:
37845 skb_trim(sarb, 0);
37846 }
37847@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
37848 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
37849
37850 usbatm_pop(vcc, skb);
37851- atomic_inc(&vcc->stats->tx);
37852+ atomic_inc_unchecked(&vcc->stats->tx);
37853
37854 skb = skb_dequeue(&instance->sndqueue);
37855 }
37856@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
37857 if (!left--)
37858 return sprintf(page,
37859 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
37860- atomic_read(&atm_dev->stats.aal5.tx),
37861- atomic_read(&atm_dev->stats.aal5.tx_err),
37862- atomic_read(&atm_dev->stats.aal5.rx),
37863- atomic_read(&atm_dev->stats.aal5.rx_err),
37864- atomic_read(&atm_dev->stats.aal5.rx_drop));
37865+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
37866+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
37867+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
37868+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
37869+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
37870
37871 if (!left--) {
37872 if (instance->disconnected)
37873diff -urNp linux-2.6.32.46/drivers/usb/class/cdc-wdm.c linux-2.6.32.46/drivers/usb/class/cdc-wdm.c
37874--- linux-2.6.32.46/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
37875+++ linux-2.6.32.46/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
37876@@ -314,7 +314,7 @@ static ssize_t wdm_write
37877 if (r < 0)
37878 goto outnp;
37879
37880- if (!file->f_flags && O_NONBLOCK)
37881+ if (!(file->f_flags & O_NONBLOCK))
37882 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
37883 &desc->flags));
37884 else
37885diff -urNp linux-2.6.32.46/drivers/usb/core/hcd.c linux-2.6.32.46/drivers/usb/core/hcd.c
37886--- linux-2.6.32.46/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
37887+++ linux-2.6.32.46/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
37888@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
37889
37890 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
37891
37892-struct usb_mon_operations *mon_ops;
37893+const struct usb_mon_operations *mon_ops;
37894
37895 /*
37896 * The registration is unlocked.
37897@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
37898 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
37899 */
37900
37901-int usb_mon_register (struct usb_mon_operations *ops)
37902+int usb_mon_register (const struct usb_mon_operations *ops)
37903 {
37904
37905 if (mon_ops)
37906diff -urNp linux-2.6.32.46/drivers/usb/core/hcd.h linux-2.6.32.46/drivers/usb/core/hcd.h
37907--- linux-2.6.32.46/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
37908+++ linux-2.6.32.46/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
37909@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
37910 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
37911
37912 struct usb_mon_operations {
37913- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
37914- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
37915- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
37916+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
37917+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
37918+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
37919 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
37920 };
37921
37922-extern struct usb_mon_operations *mon_ops;
37923+extern const struct usb_mon_operations *mon_ops;
37924
37925 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
37926 {
37927@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
37928 (*mon_ops->urb_complete)(bus, urb, status);
37929 }
37930
37931-int usb_mon_register(struct usb_mon_operations *ops);
37932+int usb_mon_register(const struct usb_mon_operations *ops);
37933 void usb_mon_deregister(void);
37934
37935 #else
37936diff -urNp linux-2.6.32.46/drivers/usb/core/message.c linux-2.6.32.46/drivers/usb/core/message.c
37937--- linux-2.6.32.46/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
37938+++ linux-2.6.32.46/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
37939@@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
37940 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
37941 if (buf) {
37942 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
37943- if (len > 0) {
37944- smallbuf = kmalloc(++len, GFP_NOIO);
37945+ if (len++ > 0) {
37946+ smallbuf = kmalloc(len, GFP_NOIO);
37947 if (!smallbuf)
37948 return buf;
37949 memcpy(smallbuf, buf, len);
37950diff -urNp linux-2.6.32.46/drivers/usb/misc/appledisplay.c linux-2.6.32.46/drivers/usb/misc/appledisplay.c
37951--- linux-2.6.32.46/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
37952+++ linux-2.6.32.46/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
37953@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
37954 return pdata->msgdata[1];
37955 }
37956
37957-static struct backlight_ops appledisplay_bl_data = {
37958+static const struct backlight_ops appledisplay_bl_data = {
37959 .get_brightness = appledisplay_bl_get_brightness,
37960 .update_status = appledisplay_bl_update_status,
37961 };
37962diff -urNp linux-2.6.32.46/drivers/usb/mon/mon_main.c linux-2.6.32.46/drivers/usb/mon/mon_main.c
37963--- linux-2.6.32.46/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
37964+++ linux-2.6.32.46/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
37965@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
37966 /*
37967 * Ops
37968 */
37969-static struct usb_mon_operations mon_ops_0 = {
37970+static const struct usb_mon_operations mon_ops_0 = {
37971 .urb_submit = mon_submit,
37972 .urb_submit_error = mon_submit_error,
37973 .urb_complete = mon_complete,
37974diff -urNp linux-2.6.32.46/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.46/drivers/usb/wusbcore/wa-hc.h
37975--- linux-2.6.32.46/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
37976+++ linux-2.6.32.46/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
37977@@ -192,7 +192,7 @@ struct wahc {
37978 struct list_head xfer_delayed_list;
37979 spinlock_t xfer_list_lock;
37980 struct work_struct xfer_work;
37981- atomic_t xfer_id_count;
37982+ atomic_unchecked_t xfer_id_count;
37983 };
37984
37985
37986@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
37987 INIT_LIST_HEAD(&wa->xfer_delayed_list);
37988 spin_lock_init(&wa->xfer_list_lock);
37989 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
37990- atomic_set(&wa->xfer_id_count, 1);
37991+ atomic_set_unchecked(&wa->xfer_id_count, 1);
37992 }
37993
37994 /**
37995diff -urNp linux-2.6.32.46/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.46/drivers/usb/wusbcore/wa-xfer.c
37996--- linux-2.6.32.46/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
37997+++ linux-2.6.32.46/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
37998@@ -293,7 +293,7 @@ out:
37999 */
38000 static void wa_xfer_id_init(struct wa_xfer *xfer)
38001 {
38002- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38003+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38004 }
38005
38006 /*
38007diff -urNp linux-2.6.32.46/drivers/uwb/wlp/messages.c linux-2.6.32.46/drivers/uwb/wlp/messages.c
38008--- linux-2.6.32.46/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
38009+++ linux-2.6.32.46/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
38010@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
38011 size_t len = skb->len;
38012 size_t used;
38013 ssize_t result;
38014- struct wlp_nonce enonce, rnonce;
38015+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
38016 enum wlp_assc_error assc_err;
38017 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
38018 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
38019diff -urNp linux-2.6.32.46/drivers/uwb/wlp/sysfs.c linux-2.6.32.46/drivers/uwb/wlp/sysfs.c
38020--- linux-2.6.32.46/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
38021+++ linux-2.6.32.46/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
38022@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
38023 return ret;
38024 }
38025
38026-static
38027-struct sysfs_ops wss_sysfs_ops = {
38028+static const struct sysfs_ops wss_sysfs_ops = {
38029 .show = wlp_wss_attr_show,
38030 .store = wlp_wss_attr_store,
38031 };
38032diff -urNp linux-2.6.32.46/drivers/video/atmel_lcdfb.c linux-2.6.32.46/drivers/video/atmel_lcdfb.c
38033--- linux-2.6.32.46/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
38034+++ linux-2.6.32.46/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
38035@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
38036 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
38037 }
38038
38039-static struct backlight_ops atmel_lcdc_bl_ops = {
38040+static const struct backlight_ops atmel_lcdc_bl_ops = {
38041 .update_status = atmel_bl_update_status,
38042 .get_brightness = atmel_bl_get_brightness,
38043 };
38044diff -urNp linux-2.6.32.46/drivers/video/aty/aty128fb.c linux-2.6.32.46/drivers/video/aty/aty128fb.c
38045--- linux-2.6.32.46/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
38046+++ linux-2.6.32.46/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
38047@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
38048 return bd->props.brightness;
38049 }
38050
38051-static struct backlight_ops aty128_bl_data = {
38052+static const struct backlight_ops aty128_bl_data = {
38053 .get_brightness = aty128_bl_get_brightness,
38054 .update_status = aty128_bl_update_status,
38055 };
38056diff -urNp linux-2.6.32.46/drivers/video/aty/atyfb_base.c linux-2.6.32.46/drivers/video/aty/atyfb_base.c
38057--- linux-2.6.32.46/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
38058+++ linux-2.6.32.46/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
38059@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
38060 return bd->props.brightness;
38061 }
38062
38063-static struct backlight_ops aty_bl_data = {
38064+static const struct backlight_ops aty_bl_data = {
38065 .get_brightness = aty_bl_get_brightness,
38066 .update_status = aty_bl_update_status,
38067 };
38068diff -urNp linux-2.6.32.46/drivers/video/aty/radeon_backlight.c linux-2.6.32.46/drivers/video/aty/radeon_backlight.c
38069--- linux-2.6.32.46/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
38070+++ linux-2.6.32.46/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
38071@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
38072 return bd->props.brightness;
38073 }
38074
38075-static struct backlight_ops radeon_bl_data = {
38076+static const struct backlight_ops radeon_bl_data = {
38077 .get_brightness = radeon_bl_get_brightness,
38078 .update_status = radeon_bl_update_status,
38079 };
38080diff -urNp linux-2.6.32.46/drivers/video/backlight/adp5520_bl.c linux-2.6.32.46/drivers/video/backlight/adp5520_bl.c
38081--- linux-2.6.32.46/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
38082+++ linux-2.6.32.46/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
38083@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
38084 return error ? data->current_brightness : reg_val;
38085 }
38086
38087-static struct backlight_ops adp5520_bl_ops = {
38088+static const struct backlight_ops adp5520_bl_ops = {
38089 .update_status = adp5520_bl_update_status,
38090 .get_brightness = adp5520_bl_get_brightness,
38091 };
38092diff -urNp linux-2.6.32.46/drivers/video/backlight/adx_bl.c linux-2.6.32.46/drivers/video/backlight/adx_bl.c
38093--- linux-2.6.32.46/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
38094+++ linux-2.6.32.46/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
38095@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
38096 return 1;
38097 }
38098
38099-static struct backlight_ops adx_backlight_ops = {
38100+static const struct backlight_ops adx_backlight_ops = {
38101 .options = 0,
38102 .update_status = adx_backlight_update_status,
38103 .get_brightness = adx_backlight_get_brightness,
38104diff -urNp linux-2.6.32.46/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.46/drivers/video/backlight/atmel-pwm-bl.c
38105--- linux-2.6.32.46/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
38106+++ linux-2.6.32.46/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
38107@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
38108 return pwm_channel_enable(&pwmbl->pwmc);
38109 }
38110
38111-static struct backlight_ops atmel_pwm_bl_ops = {
38112+static const struct backlight_ops atmel_pwm_bl_ops = {
38113 .get_brightness = atmel_pwm_bl_get_intensity,
38114 .update_status = atmel_pwm_bl_set_intensity,
38115 };
38116diff -urNp linux-2.6.32.46/drivers/video/backlight/backlight.c linux-2.6.32.46/drivers/video/backlight/backlight.c
38117--- linux-2.6.32.46/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
38118+++ linux-2.6.32.46/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
38119@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
38120 * ERR_PTR() or a pointer to the newly allocated device.
38121 */
38122 struct backlight_device *backlight_device_register(const char *name,
38123- struct device *parent, void *devdata, struct backlight_ops *ops)
38124+ struct device *parent, void *devdata, const struct backlight_ops *ops)
38125 {
38126 struct backlight_device *new_bd;
38127 int rc;
38128diff -urNp linux-2.6.32.46/drivers/video/backlight/corgi_lcd.c linux-2.6.32.46/drivers/video/backlight/corgi_lcd.c
38129--- linux-2.6.32.46/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
38130+++ linux-2.6.32.46/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
38131@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
38132 }
38133 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
38134
38135-static struct backlight_ops corgi_bl_ops = {
38136+static const struct backlight_ops corgi_bl_ops = {
38137 .get_brightness = corgi_bl_get_intensity,
38138 .update_status = corgi_bl_update_status,
38139 };
38140diff -urNp linux-2.6.32.46/drivers/video/backlight/cr_bllcd.c linux-2.6.32.46/drivers/video/backlight/cr_bllcd.c
38141--- linux-2.6.32.46/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
38142+++ linux-2.6.32.46/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
38143@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
38144 return intensity;
38145 }
38146
38147-static struct backlight_ops cr_backlight_ops = {
38148+static const struct backlight_ops cr_backlight_ops = {
38149 .get_brightness = cr_backlight_get_intensity,
38150 .update_status = cr_backlight_set_intensity,
38151 };
38152diff -urNp linux-2.6.32.46/drivers/video/backlight/da903x_bl.c linux-2.6.32.46/drivers/video/backlight/da903x_bl.c
38153--- linux-2.6.32.46/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
38154+++ linux-2.6.32.46/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
38155@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
38156 return data->current_brightness;
38157 }
38158
38159-static struct backlight_ops da903x_backlight_ops = {
38160+static const struct backlight_ops da903x_backlight_ops = {
38161 .update_status = da903x_backlight_update_status,
38162 .get_brightness = da903x_backlight_get_brightness,
38163 };
38164diff -urNp linux-2.6.32.46/drivers/video/backlight/generic_bl.c linux-2.6.32.46/drivers/video/backlight/generic_bl.c
38165--- linux-2.6.32.46/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
38166+++ linux-2.6.32.46/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
38167@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
38168 }
38169 EXPORT_SYMBOL(corgibl_limit_intensity);
38170
38171-static struct backlight_ops genericbl_ops = {
38172+static const struct backlight_ops genericbl_ops = {
38173 .options = BL_CORE_SUSPENDRESUME,
38174 .get_brightness = genericbl_get_intensity,
38175 .update_status = genericbl_send_intensity,
38176diff -urNp linux-2.6.32.46/drivers/video/backlight/hp680_bl.c linux-2.6.32.46/drivers/video/backlight/hp680_bl.c
38177--- linux-2.6.32.46/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
38178+++ linux-2.6.32.46/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
38179@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
38180 return current_intensity;
38181 }
38182
38183-static struct backlight_ops hp680bl_ops = {
38184+static const struct backlight_ops hp680bl_ops = {
38185 .get_brightness = hp680bl_get_intensity,
38186 .update_status = hp680bl_set_intensity,
38187 };
38188diff -urNp linux-2.6.32.46/drivers/video/backlight/jornada720_bl.c linux-2.6.32.46/drivers/video/backlight/jornada720_bl.c
38189--- linux-2.6.32.46/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
38190+++ linux-2.6.32.46/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
38191@@ -93,7 +93,7 @@ out:
38192 return ret;
38193 }
38194
38195-static struct backlight_ops jornada_bl_ops = {
38196+static const struct backlight_ops jornada_bl_ops = {
38197 .get_brightness = jornada_bl_get_brightness,
38198 .update_status = jornada_bl_update_status,
38199 .options = BL_CORE_SUSPENDRESUME,
38200diff -urNp linux-2.6.32.46/drivers/video/backlight/kb3886_bl.c linux-2.6.32.46/drivers/video/backlight/kb3886_bl.c
38201--- linux-2.6.32.46/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
38202+++ linux-2.6.32.46/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
38203@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
38204 return kb3886bl_intensity;
38205 }
38206
38207-static struct backlight_ops kb3886bl_ops = {
38208+static const struct backlight_ops kb3886bl_ops = {
38209 .get_brightness = kb3886bl_get_intensity,
38210 .update_status = kb3886bl_send_intensity,
38211 };
38212diff -urNp linux-2.6.32.46/drivers/video/backlight/locomolcd.c linux-2.6.32.46/drivers/video/backlight/locomolcd.c
38213--- linux-2.6.32.46/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
38214+++ linux-2.6.32.46/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
38215@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
38216 return current_intensity;
38217 }
38218
38219-static struct backlight_ops locomobl_data = {
38220+static const struct backlight_ops locomobl_data = {
38221 .get_brightness = locomolcd_get_intensity,
38222 .update_status = locomolcd_set_intensity,
38223 };
38224diff -urNp linux-2.6.32.46/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.46/drivers/video/backlight/mbp_nvidia_bl.c
38225--- linux-2.6.32.46/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
38226+++ linux-2.6.32.46/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
38227@@ -33,7 +33,7 @@ struct dmi_match_data {
38228 unsigned long iostart;
38229 unsigned long iolen;
38230 /* Backlight operations structure. */
38231- struct backlight_ops backlight_ops;
38232+ const struct backlight_ops backlight_ops;
38233 };
38234
38235 /* Module parameters. */
38236diff -urNp linux-2.6.32.46/drivers/video/backlight/omap1_bl.c linux-2.6.32.46/drivers/video/backlight/omap1_bl.c
38237--- linux-2.6.32.46/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
38238+++ linux-2.6.32.46/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
38239@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
38240 return bl->current_intensity;
38241 }
38242
38243-static struct backlight_ops omapbl_ops = {
38244+static const struct backlight_ops omapbl_ops = {
38245 .get_brightness = omapbl_get_intensity,
38246 .update_status = omapbl_update_status,
38247 };
38248diff -urNp linux-2.6.32.46/drivers/video/backlight/progear_bl.c linux-2.6.32.46/drivers/video/backlight/progear_bl.c
38249--- linux-2.6.32.46/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
38250+++ linux-2.6.32.46/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
38251@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
38252 return intensity - HW_LEVEL_MIN;
38253 }
38254
38255-static struct backlight_ops progearbl_ops = {
38256+static const struct backlight_ops progearbl_ops = {
38257 .get_brightness = progearbl_get_intensity,
38258 .update_status = progearbl_set_intensity,
38259 };
38260diff -urNp linux-2.6.32.46/drivers/video/backlight/pwm_bl.c linux-2.6.32.46/drivers/video/backlight/pwm_bl.c
38261--- linux-2.6.32.46/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
38262+++ linux-2.6.32.46/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
38263@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
38264 return bl->props.brightness;
38265 }
38266
38267-static struct backlight_ops pwm_backlight_ops = {
38268+static const struct backlight_ops pwm_backlight_ops = {
38269 .update_status = pwm_backlight_update_status,
38270 .get_brightness = pwm_backlight_get_brightness,
38271 };
38272diff -urNp linux-2.6.32.46/drivers/video/backlight/tosa_bl.c linux-2.6.32.46/drivers/video/backlight/tosa_bl.c
38273--- linux-2.6.32.46/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
38274+++ linux-2.6.32.46/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
38275@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
38276 return props->brightness;
38277 }
38278
38279-static struct backlight_ops bl_ops = {
38280+static const struct backlight_ops bl_ops = {
38281 .get_brightness = tosa_bl_get_brightness,
38282 .update_status = tosa_bl_update_status,
38283 };
38284diff -urNp linux-2.6.32.46/drivers/video/backlight/wm831x_bl.c linux-2.6.32.46/drivers/video/backlight/wm831x_bl.c
38285--- linux-2.6.32.46/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
38286+++ linux-2.6.32.46/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
38287@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
38288 return data->current_brightness;
38289 }
38290
38291-static struct backlight_ops wm831x_backlight_ops = {
38292+static const struct backlight_ops wm831x_backlight_ops = {
38293 .options = BL_CORE_SUSPENDRESUME,
38294 .update_status = wm831x_backlight_update_status,
38295 .get_brightness = wm831x_backlight_get_brightness,
38296diff -urNp linux-2.6.32.46/drivers/video/bf54x-lq043fb.c linux-2.6.32.46/drivers/video/bf54x-lq043fb.c
38297--- linux-2.6.32.46/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
38298+++ linux-2.6.32.46/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
38299@@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
38300 return 0;
38301 }
38302
38303-static struct backlight_ops bfin_lq043fb_bl_ops = {
38304+static const struct backlight_ops bfin_lq043fb_bl_ops = {
38305 .get_brightness = bl_get_brightness,
38306 };
38307
38308diff -urNp linux-2.6.32.46/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.46/drivers/video/bfin-t350mcqb-fb.c
38309--- linux-2.6.32.46/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
38310+++ linux-2.6.32.46/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
38311@@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
38312 return 0;
38313 }
38314
38315-static struct backlight_ops bfin_lq043fb_bl_ops = {
38316+static const struct backlight_ops bfin_lq043fb_bl_ops = {
38317 .get_brightness = bl_get_brightness,
38318 };
38319
38320diff -urNp linux-2.6.32.46/drivers/video/fbcmap.c linux-2.6.32.46/drivers/video/fbcmap.c
38321--- linux-2.6.32.46/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
38322+++ linux-2.6.32.46/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
38323@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
38324 rc = -ENODEV;
38325 goto out;
38326 }
38327- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38328- !info->fbops->fb_setcmap)) {
38329+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38330 rc = -EINVAL;
38331 goto out1;
38332 }
38333diff -urNp linux-2.6.32.46/drivers/video/fbmem.c linux-2.6.32.46/drivers/video/fbmem.c
38334--- linux-2.6.32.46/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
38335+++ linux-2.6.32.46/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
38336@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
38337 image->dx += image->width + 8;
38338 }
38339 } else if (rotate == FB_ROTATE_UD) {
38340- for (x = 0; x < num && image->dx >= 0; x++) {
38341+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38342 info->fbops->fb_imageblit(info, image);
38343 image->dx -= image->width + 8;
38344 }
38345@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
38346 image->dy += image->height + 8;
38347 }
38348 } else if (rotate == FB_ROTATE_CCW) {
38349- for (x = 0; x < num && image->dy >= 0; x++) {
38350+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38351 info->fbops->fb_imageblit(info, image);
38352 image->dy -= image->height + 8;
38353 }
38354@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
38355 int flags = info->flags;
38356 int ret = 0;
38357
38358+ pax_track_stack();
38359+
38360 if (var->activate & FB_ACTIVATE_INV_MODE) {
38361 struct fb_videomode mode1, mode2;
38362
38363@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
38364 void __user *argp = (void __user *)arg;
38365 long ret = 0;
38366
38367+ pax_track_stack();
38368+
38369 switch (cmd) {
38370 case FBIOGET_VSCREENINFO:
38371 if (!lock_fb_info(info))
38372@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
38373 return -EFAULT;
38374 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38375 return -EINVAL;
38376- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38377+ if (con2fb.framebuffer >= FB_MAX)
38378 return -EINVAL;
38379 if (!registered_fb[con2fb.framebuffer])
38380 request_module("fb%d", con2fb.framebuffer);
38381diff -urNp linux-2.6.32.46/drivers/video/i810/i810_accel.c linux-2.6.32.46/drivers/video/i810/i810_accel.c
38382--- linux-2.6.32.46/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
38383+++ linux-2.6.32.46/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
38384@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
38385 }
38386 }
38387 printk("ringbuffer lockup!!!\n");
38388+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38389 i810_report_error(mmio);
38390 par->dev_flags |= LOCKUP;
38391 info->pixmap.scan_align = 1;
38392diff -urNp linux-2.6.32.46/drivers/video/nvidia/nv_backlight.c linux-2.6.32.46/drivers/video/nvidia/nv_backlight.c
38393--- linux-2.6.32.46/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
38394+++ linux-2.6.32.46/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
38395@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
38396 return bd->props.brightness;
38397 }
38398
38399-static struct backlight_ops nvidia_bl_ops = {
38400+static const struct backlight_ops nvidia_bl_ops = {
38401 .get_brightness = nvidia_bl_get_brightness,
38402 .update_status = nvidia_bl_update_status,
38403 };
38404diff -urNp linux-2.6.32.46/drivers/video/riva/fbdev.c linux-2.6.32.46/drivers/video/riva/fbdev.c
38405--- linux-2.6.32.46/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
38406+++ linux-2.6.32.46/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
38407@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
38408 return bd->props.brightness;
38409 }
38410
38411-static struct backlight_ops riva_bl_ops = {
38412+static const struct backlight_ops riva_bl_ops = {
38413 .get_brightness = riva_bl_get_brightness,
38414 .update_status = riva_bl_update_status,
38415 };
38416diff -urNp linux-2.6.32.46/drivers/video/uvesafb.c linux-2.6.32.46/drivers/video/uvesafb.c
38417--- linux-2.6.32.46/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
38418+++ linux-2.6.32.46/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
38419@@ -18,6 +18,7 @@
38420 #include <linux/fb.h>
38421 #include <linux/io.h>
38422 #include <linux/mutex.h>
38423+#include <linux/moduleloader.h>
38424 #include <video/edid.h>
38425 #include <video/uvesafb.h>
38426 #ifdef CONFIG_X86
38427@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
38428 NULL,
38429 };
38430
38431- return call_usermodehelper(v86d_path, argv, envp, 1);
38432+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
38433 }
38434
38435 /*
38436@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
38437 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
38438 par->pmi_setpal = par->ypan = 0;
38439 } else {
38440+
38441+#ifdef CONFIG_PAX_KERNEXEC
38442+#ifdef CONFIG_MODULES
38443+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
38444+#endif
38445+ if (!par->pmi_code) {
38446+ par->pmi_setpal = par->ypan = 0;
38447+ return 0;
38448+ }
38449+#endif
38450+
38451 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
38452 + task->t.regs.edi);
38453+
38454+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38455+ pax_open_kernel();
38456+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
38457+ pax_close_kernel();
38458+
38459+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
38460+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
38461+#else
38462 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
38463 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
38464+#endif
38465+
38466 printk(KERN_INFO "uvesafb: protected mode interface info at "
38467 "%04x:%04x\n",
38468 (u16)task->t.regs.es, (u16)task->t.regs.edi);
38469@@ -1799,6 +1822,11 @@ out:
38470 if (par->vbe_modes)
38471 kfree(par->vbe_modes);
38472
38473+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38474+ if (par->pmi_code)
38475+ module_free_exec(NULL, par->pmi_code);
38476+#endif
38477+
38478 framebuffer_release(info);
38479 return err;
38480 }
38481@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
38482 kfree(par->vbe_state_orig);
38483 if (par->vbe_state_saved)
38484 kfree(par->vbe_state_saved);
38485+
38486+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38487+ if (par->pmi_code)
38488+ module_free_exec(NULL, par->pmi_code);
38489+#endif
38490+
38491 }
38492
38493 framebuffer_release(info);
38494diff -urNp linux-2.6.32.46/drivers/video/vesafb.c linux-2.6.32.46/drivers/video/vesafb.c
38495--- linux-2.6.32.46/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
38496+++ linux-2.6.32.46/drivers/video/vesafb.c 2011-08-05 20:33:55.000000000 -0400
38497@@ -9,6 +9,7 @@
38498 */
38499
38500 #include <linux/module.h>
38501+#include <linux/moduleloader.h>
38502 #include <linux/kernel.h>
38503 #include <linux/errno.h>
38504 #include <linux/string.h>
38505@@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
38506 static int vram_total __initdata; /* Set total amount of memory */
38507 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
38508 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
38509-static void (*pmi_start)(void) __read_mostly;
38510-static void (*pmi_pal) (void) __read_mostly;
38511+static void (*pmi_start)(void) __read_only;
38512+static void (*pmi_pal) (void) __read_only;
38513 static int depth __read_mostly;
38514 static int vga_compat __read_mostly;
38515 /* --------------------------------------------------------------------- */
38516@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
38517 unsigned int size_vmode;
38518 unsigned int size_remap;
38519 unsigned int size_total;
38520+ void *pmi_code = NULL;
38521
38522 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
38523 return -ENODEV;
38524@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
38525 size_remap = size_total;
38526 vesafb_fix.smem_len = size_remap;
38527
38528-#ifndef __i386__
38529- screen_info.vesapm_seg = 0;
38530-#endif
38531-
38532 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
38533 printk(KERN_WARNING
38534 "vesafb: cannot reserve video memory at 0x%lx\n",
38535@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
38536 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
38537 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
38538
38539+#ifdef __i386__
38540+
38541+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38542+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
38543+ if (!pmi_code)
38544+#elif !defined(CONFIG_PAX_KERNEXEC)
38545+ if (0)
38546+#endif
38547+
38548+#endif
38549+ screen_info.vesapm_seg = 0;
38550+
38551 if (screen_info.vesapm_seg) {
38552- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
38553- screen_info.vesapm_seg,screen_info.vesapm_off);
38554+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
38555+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
38556 }
38557
38558 if (screen_info.vesapm_seg < 0xc000)
38559@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
38560
38561 if (ypan || pmi_setpal) {
38562 unsigned short *pmi_base;
38563+
38564 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
38565- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
38566- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
38567+
38568+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38569+ pax_open_kernel();
38570+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
38571+#else
38572+ pmi_code = pmi_base;
38573+#endif
38574+
38575+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
38576+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
38577+
38578+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38579+ pmi_start = ktva_ktla(pmi_start);
38580+ pmi_pal = ktva_ktla(pmi_pal);
38581+ pax_close_kernel();
38582+#endif
38583+
38584 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
38585 if (pmi_base[3]) {
38586 printk(KERN_INFO "vesafb: pmi: ports = ");
38587@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
38588 info->node, info->fix.id);
38589 return 0;
38590 err:
38591+
38592+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38593+ module_free_exec(NULL, pmi_code);
38594+#endif
38595+
38596 if (info->screen_base)
38597 iounmap(info->screen_base);
38598 framebuffer_release(info);
38599diff -urNp linux-2.6.32.46/drivers/xen/sys-hypervisor.c linux-2.6.32.46/drivers/xen/sys-hypervisor.c
38600--- linux-2.6.32.46/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
38601+++ linux-2.6.32.46/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
38602@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
38603 return 0;
38604 }
38605
38606-static struct sysfs_ops hyp_sysfs_ops = {
38607+static const struct sysfs_ops hyp_sysfs_ops = {
38608 .show = hyp_sysfs_show,
38609 .store = hyp_sysfs_store,
38610 };
38611diff -urNp linux-2.6.32.46/fs/9p/vfs_inode.c linux-2.6.32.46/fs/9p/vfs_inode.c
38612--- linux-2.6.32.46/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
38613+++ linux-2.6.32.46/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
38614@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
38615 static void
38616 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38617 {
38618- char *s = nd_get_link(nd);
38619+ const char *s = nd_get_link(nd);
38620
38621 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
38622 IS_ERR(s) ? "<error>" : s);
38623diff -urNp linux-2.6.32.46/fs/aio.c linux-2.6.32.46/fs/aio.c
38624--- linux-2.6.32.46/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
38625+++ linux-2.6.32.46/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
38626@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
38627 size += sizeof(struct io_event) * nr_events;
38628 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
38629
38630- if (nr_pages < 0)
38631+ if (nr_pages <= 0)
38632 return -EINVAL;
38633
38634 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
38635@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
38636 struct aio_timeout to;
38637 int retry = 0;
38638
38639+ pax_track_stack();
38640+
38641 /* needed to zero any padding within an entry (there shouldn't be
38642 * any, but C is fun!
38643 */
38644@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
38645 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
38646 {
38647 ssize_t ret;
38648+ struct iovec iovstack;
38649
38650 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
38651 kiocb->ki_nbytes, 1,
38652- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
38653+ &iovstack, &kiocb->ki_iovec);
38654 if (ret < 0)
38655 goto out;
38656
38657+ if (kiocb->ki_iovec == &iovstack) {
38658+ kiocb->ki_inline_vec = iovstack;
38659+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
38660+ }
38661 kiocb->ki_nr_segs = kiocb->ki_nbytes;
38662 kiocb->ki_cur_seg = 0;
38663 /* ki_nbytes/left now reflect bytes instead of segs */
38664diff -urNp linux-2.6.32.46/fs/attr.c linux-2.6.32.46/fs/attr.c
38665--- linux-2.6.32.46/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
38666+++ linux-2.6.32.46/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
38667@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
38668 unsigned long limit;
38669
38670 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
38671+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
38672 if (limit != RLIM_INFINITY && offset > limit)
38673 goto out_sig;
38674 if (offset > inode->i_sb->s_maxbytes)
38675diff -urNp linux-2.6.32.46/fs/autofs/root.c linux-2.6.32.46/fs/autofs/root.c
38676--- linux-2.6.32.46/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
38677+++ linux-2.6.32.46/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
38678@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
38679 set_bit(n,sbi->symlink_bitmap);
38680 sl = &sbi->symlink[n];
38681 sl->len = strlen(symname);
38682- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
38683+ slsize = sl->len+1;
38684+ sl->data = kmalloc(slsize, GFP_KERNEL);
38685 if (!sl->data) {
38686 clear_bit(n,sbi->symlink_bitmap);
38687 unlock_kernel();
38688diff -urNp linux-2.6.32.46/fs/autofs4/symlink.c linux-2.6.32.46/fs/autofs4/symlink.c
38689--- linux-2.6.32.46/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
38690+++ linux-2.6.32.46/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
38691@@ -15,7 +15,7 @@
38692 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
38693 {
38694 struct autofs_info *ino = autofs4_dentry_ino(dentry);
38695- nd_set_link(nd, (char *)ino->u.symlink);
38696+ nd_set_link(nd, ino->u.symlink);
38697 return NULL;
38698 }
38699
38700diff -urNp linux-2.6.32.46/fs/befs/linuxvfs.c linux-2.6.32.46/fs/befs/linuxvfs.c
38701--- linux-2.6.32.46/fs/befs/linuxvfs.c 2011-08-29 22:24:44.000000000 -0400
38702+++ linux-2.6.32.46/fs/befs/linuxvfs.c 2011-08-29 22:25:07.000000000 -0400
38703@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry
38704 {
38705 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
38706 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
38707- char *link = nd_get_link(nd);
38708+ const char *link = nd_get_link(nd);
38709 if (!IS_ERR(link))
38710 kfree(link);
38711 }
38712diff -urNp linux-2.6.32.46/fs/binfmt_aout.c linux-2.6.32.46/fs/binfmt_aout.c
38713--- linux-2.6.32.46/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
38714+++ linux-2.6.32.46/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
38715@@ -16,6 +16,7 @@
38716 #include <linux/string.h>
38717 #include <linux/fs.h>
38718 #include <linux/file.h>
38719+#include <linux/security.h>
38720 #include <linux/stat.h>
38721 #include <linux/fcntl.h>
38722 #include <linux/ptrace.h>
38723@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
38724 #endif
38725 # define START_STACK(u) (u.start_stack)
38726
38727+ memset(&dump, 0, sizeof(dump));
38728+
38729 fs = get_fs();
38730 set_fs(KERNEL_DS);
38731 has_dumped = 1;
38732@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
38733
38734 /* If the size of the dump file exceeds the rlimit, then see what would happen
38735 if we wrote the stack, but not the data area. */
38736+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
38737 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
38738 dump.u_dsize = 0;
38739
38740 /* Make sure we have enough room to write the stack and data areas. */
38741+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
38742 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
38743 dump.u_ssize = 0;
38744
38745@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
38746 dump_size = dump.u_ssize << PAGE_SHIFT;
38747 DUMP_WRITE(dump_start,dump_size);
38748 }
38749-/* Finally dump the task struct. Not be used by gdb, but could be useful */
38750- set_fs(KERNEL_DS);
38751- DUMP_WRITE(current,sizeof(*current));
38752+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
38753 end_coredump:
38754 set_fs(fs);
38755 return has_dumped;
38756@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
38757 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
38758 if (rlim >= RLIM_INFINITY)
38759 rlim = ~0;
38760+
38761+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
38762 if (ex.a_data + ex.a_bss > rlim)
38763 return -ENOMEM;
38764
38765@@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
38766 install_exec_creds(bprm);
38767 current->flags &= ~PF_FORKNOEXEC;
38768
38769+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
38770+ current->mm->pax_flags = 0UL;
38771+#endif
38772+
38773+#ifdef CONFIG_PAX_PAGEEXEC
38774+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
38775+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
38776+
38777+#ifdef CONFIG_PAX_EMUTRAMP
38778+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
38779+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
38780+#endif
38781+
38782+#ifdef CONFIG_PAX_MPROTECT
38783+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
38784+ current->mm->pax_flags |= MF_PAX_MPROTECT;
38785+#endif
38786+
38787+ }
38788+#endif
38789+
38790 if (N_MAGIC(ex) == OMAGIC) {
38791 unsigned long text_addr, map_size;
38792 loff_t pos;
38793@@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
38794
38795 down_write(&current->mm->mmap_sem);
38796 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
38797- PROT_READ | PROT_WRITE | PROT_EXEC,
38798+ PROT_READ | PROT_WRITE,
38799 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
38800 fd_offset + ex.a_text);
38801 up_write(&current->mm->mmap_sem);
38802diff -urNp linux-2.6.32.46/fs/binfmt_elf.c linux-2.6.32.46/fs/binfmt_elf.c
38803--- linux-2.6.32.46/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
38804+++ linux-2.6.32.46/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
38805@@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
38806 #define elf_core_dump NULL
38807 #endif
38808
38809+#ifdef CONFIG_PAX_MPROTECT
38810+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
38811+#endif
38812+
38813 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
38814 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
38815 #else
38816@@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
38817 .load_binary = load_elf_binary,
38818 .load_shlib = load_elf_library,
38819 .core_dump = elf_core_dump,
38820+
38821+#ifdef CONFIG_PAX_MPROTECT
38822+ .handle_mprotect= elf_handle_mprotect,
38823+#endif
38824+
38825 .min_coredump = ELF_EXEC_PAGESIZE,
38826 .hasvdso = 1
38827 };
38828@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
38829
38830 static int set_brk(unsigned long start, unsigned long end)
38831 {
38832+ unsigned long e = end;
38833+
38834 start = ELF_PAGEALIGN(start);
38835 end = ELF_PAGEALIGN(end);
38836 if (end > start) {
38837@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
38838 if (BAD_ADDR(addr))
38839 return addr;
38840 }
38841- current->mm->start_brk = current->mm->brk = end;
38842+ current->mm->start_brk = current->mm->brk = e;
38843 return 0;
38844 }
38845
38846@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
38847 elf_addr_t __user *u_rand_bytes;
38848 const char *k_platform = ELF_PLATFORM;
38849 const char *k_base_platform = ELF_BASE_PLATFORM;
38850- unsigned char k_rand_bytes[16];
38851+ u32 k_rand_bytes[4];
38852 int items;
38853 elf_addr_t *elf_info;
38854 int ei_index = 0;
38855 const struct cred *cred = current_cred();
38856 struct vm_area_struct *vma;
38857+ unsigned long saved_auxv[AT_VECTOR_SIZE];
38858+
38859+ pax_track_stack();
38860
38861 /*
38862 * In some cases (e.g. Hyper-Threading), we want to avoid L1
38863@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
38864 * Generate 16 random bytes for userspace PRNG seeding.
38865 */
38866 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
38867- u_rand_bytes = (elf_addr_t __user *)
38868- STACK_ALLOC(p, sizeof(k_rand_bytes));
38869+ srandom32(k_rand_bytes[0] ^ random32());
38870+ srandom32(k_rand_bytes[1] ^ random32());
38871+ srandom32(k_rand_bytes[2] ^ random32());
38872+ srandom32(k_rand_bytes[3] ^ random32());
38873+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
38874+ u_rand_bytes = (elf_addr_t __user *) p;
38875 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
38876 return -EFAULT;
38877
38878@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
38879 return -EFAULT;
38880 current->mm->env_end = p;
38881
38882+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
38883+
38884 /* Put the elf_info on the stack in the right place. */
38885 sp = (elf_addr_t __user *)envp + 1;
38886- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
38887+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
38888 return -EFAULT;
38889 return 0;
38890 }
38891@@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
38892 {
38893 struct elf_phdr *elf_phdata;
38894 struct elf_phdr *eppnt;
38895- unsigned long load_addr = 0;
38896+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
38897 int load_addr_set = 0;
38898 unsigned long last_bss = 0, elf_bss = 0;
38899- unsigned long error = ~0UL;
38900+ unsigned long error = -EINVAL;
38901 unsigned long total_size;
38902 int retval, i, size;
38903
38904@@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
38905 goto out_close;
38906 }
38907
38908+#ifdef CONFIG_PAX_SEGMEXEC
38909+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
38910+ pax_task_size = SEGMEXEC_TASK_SIZE;
38911+#endif
38912+
38913 eppnt = elf_phdata;
38914 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
38915 if (eppnt->p_type == PT_LOAD) {
38916@@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
38917 k = load_addr + eppnt->p_vaddr;
38918 if (BAD_ADDR(k) ||
38919 eppnt->p_filesz > eppnt->p_memsz ||
38920- eppnt->p_memsz > TASK_SIZE ||
38921- TASK_SIZE - eppnt->p_memsz < k) {
38922+ eppnt->p_memsz > pax_task_size ||
38923+ pax_task_size - eppnt->p_memsz < k) {
38924 error = -ENOMEM;
38925 goto out_close;
38926 }
38927@@ -532,6 +557,194 @@ out:
38928 return error;
38929 }
38930
38931+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
38932+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
38933+{
38934+ unsigned long pax_flags = 0UL;
38935+
38936+#ifdef CONFIG_PAX_PAGEEXEC
38937+ if (elf_phdata->p_flags & PF_PAGEEXEC)
38938+ pax_flags |= MF_PAX_PAGEEXEC;
38939+#endif
38940+
38941+#ifdef CONFIG_PAX_SEGMEXEC
38942+ if (elf_phdata->p_flags & PF_SEGMEXEC)
38943+ pax_flags |= MF_PAX_SEGMEXEC;
38944+#endif
38945+
38946+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
38947+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38948+ if (nx_enabled)
38949+ pax_flags &= ~MF_PAX_SEGMEXEC;
38950+ else
38951+ pax_flags &= ~MF_PAX_PAGEEXEC;
38952+ }
38953+#endif
38954+
38955+#ifdef CONFIG_PAX_EMUTRAMP
38956+ if (elf_phdata->p_flags & PF_EMUTRAMP)
38957+ pax_flags |= MF_PAX_EMUTRAMP;
38958+#endif
38959+
38960+#ifdef CONFIG_PAX_MPROTECT
38961+ if (elf_phdata->p_flags & PF_MPROTECT)
38962+ pax_flags |= MF_PAX_MPROTECT;
38963+#endif
38964+
38965+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
38966+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
38967+ pax_flags |= MF_PAX_RANDMMAP;
38968+#endif
38969+
38970+ return pax_flags;
38971+}
38972+#endif
38973+
38974+#ifdef CONFIG_PAX_PT_PAX_FLAGS
38975+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
38976+{
38977+ unsigned long pax_flags = 0UL;
38978+
38979+#ifdef CONFIG_PAX_PAGEEXEC
38980+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
38981+ pax_flags |= MF_PAX_PAGEEXEC;
38982+#endif
38983+
38984+#ifdef CONFIG_PAX_SEGMEXEC
38985+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
38986+ pax_flags |= MF_PAX_SEGMEXEC;
38987+#endif
38988+
38989+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
38990+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38991+ if (nx_enabled)
38992+ pax_flags &= ~MF_PAX_SEGMEXEC;
38993+ else
38994+ pax_flags &= ~MF_PAX_PAGEEXEC;
38995+ }
38996+#endif
38997+
38998+#ifdef CONFIG_PAX_EMUTRAMP
38999+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39000+ pax_flags |= MF_PAX_EMUTRAMP;
39001+#endif
39002+
39003+#ifdef CONFIG_PAX_MPROTECT
39004+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39005+ pax_flags |= MF_PAX_MPROTECT;
39006+#endif
39007+
39008+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39009+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39010+ pax_flags |= MF_PAX_RANDMMAP;
39011+#endif
39012+
39013+ return pax_flags;
39014+}
39015+#endif
39016+
39017+#ifdef CONFIG_PAX_EI_PAX
39018+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39019+{
39020+ unsigned long pax_flags = 0UL;
39021+
39022+#ifdef CONFIG_PAX_PAGEEXEC
39023+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39024+ pax_flags |= MF_PAX_PAGEEXEC;
39025+#endif
39026+
39027+#ifdef CONFIG_PAX_SEGMEXEC
39028+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39029+ pax_flags |= MF_PAX_SEGMEXEC;
39030+#endif
39031+
39032+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39033+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39034+ if (nx_enabled)
39035+ pax_flags &= ~MF_PAX_SEGMEXEC;
39036+ else
39037+ pax_flags &= ~MF_PAX_PAGEEXEC;
39038+ }
39039+#endif
39040+
39041+#ifdef CONFIG_PAX_EMUTRAMP
39042+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39043+ pax_flags |= MF_PAX_EMUTRAMP;
39044+#endif
39045+
39046+#ifdef CONFIG_PAX_MPROTECT
39047+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39048+ pax_flags |= MF_PAX_MPROTECT;
39049+#endif
39050+
39051+#ifdef CONFIG_PAX_ASLR
39052+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39053+ pax_flags |= MF_PAX_RANDMMAP;
39054+#endif
39055+
39056+ return pax_flags;
39057+}
39058+#endif
39059+
39060+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39061+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39062+{
39063+ unsigned long pax_flags = 0UL;
39064+
39065+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39066+ unsigned long i;
39067+ int found_flags = 0;
39068+#endif
39069+
39070+#ifdef CONFIG_PAX_EI_PAX
39071+ pax_flags = pax_parse_ei_pax(elf_ex);
39072+#endif
39073+
39074+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39075+ for (i = 0UL; i < elf_ex->e_phnum; i++)
39076+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39077+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39078+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39079+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39080+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39081+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39082+ return -EINVAL;
39083+
39084+#ifdef CONFIG_PAX_SOFTMODE
39085+ if (pax_softmode)
39086+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
39087+ else
39088+#endif
39089+
39090+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
39091+ found_flags = 1;
39092+ break;
39093+ }
39094+#endif
39095+
39096+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
39097+ if (found_flags == 0) {
39098+ struct elf_phdr phdr;
39099+ memset(&phdr, 0, sizeof(phdr));
39100+ phdr.p_flags = PF_NOEMUTRAMP;
39101+#ifdef CONFIG_PAX_SOFTMODE
39102+ if (pax_softmode)
39103+ pax_flags = pax_parse_softmode(&phdr);
39104+ else
39105+#endif
39106+ pax_flags = pax_parse_hardmode(&phdr);
39107+ }
39108+#endif
39109+
39110+
39111+ if (0 > pax_check_flags(&pax_flags))
39112+ return -EINVAL;
39113+
39114+ current->mm->pax_flags = pax_flags;
39115+ return 0;
39116+}
39117+#endif
39118+
39119 /*
39120 * These are the functions used to load ELF style executables and shared
39121 * libraries. There is no binary dependent code anywhere else.
39122@@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
39123 {
39124 unsigned int random_variable = 0;
39125
39126+#ifdef CONFIG_PAX_RANDUSTACK
39127+ if (randomize_va_space)
39128+ return stack_top - current->mm->delta_stack;
39129+#endif
39130+
39131 if ((current->flags & PF_RANDOMIZE) &&
39132 !(current->personality & ADDR_NO_RANDOMIZE)) {
39133 random_variable = get_random_int() & STACK_RND_MASK;
39134@@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
39135 unsigned long load_addr = 0, load_bias = 0;
39136 int load_addr_set = 0;
39137 char * elf_interpreter = NULL;
39138- unsigned long error;
39139+ unsigned long error = 0;
39140 struct elf_phdr *elf_ppnt, *elf_phdata;
39141 unsigned long elf_bss, elf_brk;
39142 int retval, i;
39143@@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
39144 unsigned long start_code, end_code, start_data, end_data;
39145 unsigned long reloc_func_desc = 0;
39146 int executable_stack = EXSTACK_DEFAULT;
39147- unsigned long def_flags = 0;
39148 struct {
39149 struct elfhdr elf_ex;
39150 struct elfhdr interp_elf_ex;
39151 } *loc;
39152+ unsigned long pax_task_size = TASK_SIZE;
39153
39154 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
39155 if (!loc) {
39156@@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
39157
39158 /* OK, This is the point of no return */
39159 current->flags &= ~PF_FORKNOEXEC;
39160- current->mm->def_flags = def_flags;
39161+
39162+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39163+ current->mm->pax_flags = 0UL;
39164+#endif
39165+
39166+#ifdef CONFIG_PAX_DLRESOLVE
39167+ current->mm->call_dl_resolve = 0UL;
39168+#endif
39169+
39170+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
39171+ current->mm->call_syscall = 0UL;
39172+#endif
39173+
39174+#ifdef CONFIG_PAX_ASLR
39175+ current->mm->delta_mmap = 0UL;
39176+ current->mm->delta_stack = 0UL;
39177+#endif
39178+
39179+ current->mm->def_flags = 0;
39180+
39181+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39182+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
39183+ send_sig(SIGKILL, current, 0);
39184+ goto out_free_dentry;
39185+ }
39186+#endif
39187+
39188+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
39189+ pax_set_initial_flags(bprm);
39190+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
39191+ if (pax_set_initial_flags_func)
39192+ (pax_set_initial_flags_func)(bprm);
39193+#endif
39194+
39195+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
39196+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
39197+ current->mm->context.user_cs_limit = PAGE_SIZE;
39198+ current->mm->def_flags |= VM_PAGEEXEC;
39199+ }
39200+#endif
39201+
39202+#ifdef CONFIG_PAX_SEGMEXEC
39203+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
39204+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
39205+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
39206+ pax_task_size = SEGMEXEC_TASK_SIZE;
39207+ }
39208+#endif
39209+
39210+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
39211+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39212+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
39213+ put_cpu();
39214+ }
39215+#endif
39216
39217 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
39218 may depend on the personality. */
39219 SET_PERSONALITY(loc->elf_ex);
39220+
39221+#ifdef CONFIG_PAX_ASLR
39222+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
39223+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
39224+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
39225+ }
39226+#endif
39227+
39228+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39229+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39230+ executable_stack = EXSTACK_DISABLE_X;
39231+ current->personality &= ~READ_IMPLIES_EXEC;
39232+ } else
39233+#endif
39234+
39235 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
39236 current->personality |= READ_IMPLIES_EXEC;
39237
39238@@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
39239 #else
39240 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
39241 #endif
39242+
39243+#ifdef CONFIG_PAX_RANDMMAP
39244+ /* PaX: randomize base address at the default exe base if requested */
39245+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
39246+#ifdef CONFIG_SPARC64
39247+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
39248+#else
39249+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
39250+#endif
39251+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
39252+ elf_flags |= MAP_FIXED;
39253+ }
39254+#endif
39255+
39256 }
39257
39258 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
39259@@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
39260 * allowed task size. Note that p_filesz must always be
39261 * <= p_memsz so it is only necessary to check p_memsz.
39262 */
39263- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39264- elf_ppnt->p_memsz > TASK_SIZE ||
39265- TASK_SIZE - elf_ppnt->p_memsz < k) {
39266+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39267+ elf_ppnt->p_memsz > pax_task_size ||
39268+ pax_task_size - elf_ppnt->p_memsz < k) {
39269 /* set_brk can never work. Avoid overflows. */
39270 send_sig(SIGKILL, current, 0);
39271 retval = -EINVAL;
39272@@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
39273 start_data += load_bias;
39274 end_data += load_bias;
39275
39276+#ifdef CONFIG_PAX_RANDMMAP
39277+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
39278+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
39279+#endif
39280+
39281 /* Calling set_brk effectively mmaps the pages that we need
39282 * for the bss and break sections. We must do this before
39283 * mapping in the interpreter, to make sure it doesn't wind
39284@@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
39285 goto out_free_dentry;
39286 }
39287 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
39288- send_sig(SIGSEGV, current, 0);
39289- retval = -EFAULT; /* Nobody gets to see this, but.. */
39290- goto out_free_dentry;
39291+ /*
39292+ * This bss-zeroing can fail if the ELF
39293+ * file specifies odd protections. So
39294+ * we don't check the return value
39295+ */
39296 }
39297
39298 if (elf_interpreter) {
39299@@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
39300 unsigned long n = off;
39301 if (n > PAGE_SIZE)
39302 n = PAGE_SIZE;
39303- if (!dump_write(file, buf, n))
39304+ if (!dump_write(file, buf, n)) {
39305+ free_page((unsigned long)buf);
39306 return 0;
39307+ }
39308 off -= n;
39309 }
39310 free_page((unsigned long)buf);
39311@@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
39312 * Decide what to dump of a segment, part, all or none.
39313 */
39314 static unsigned long vma_dump_size(struct vm_area_struct *vma,
39315- unsigned long mm_flags)
39316+ unsigned long mm_flags, long signr)
39317 {
39318 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
39319
39320@@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
39321 if (vma->vm_file == NULL)
39322 return 0;
39323
39324- if (FILTER(MAPPED_PRIVATE))
39325+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
39326 goto whole;
39327
39328 /*
39329@@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
39330 #undef DUMP_WRITE
39331
39332 #define DUMP_WRITE(addr, nr) \
39333+ do { \
39334+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
39335 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
39336- goto end_coredump;
39337+ goto end_coredump; \
39338+ } while (0);
39339
39340 static void fill_elf_header(struct elfhdr *elf, int segs,
39341 u16 machine, u32 flags, u8 osabi)
39342@@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
39343 {
39344 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
39345 int i = 0;
39346- do
39347+ do {
39348 i += 2;
39349- while (auxv[i - 2] != AT_NULL);
39350+ } while (auxv[i - 2] != AT_NULL);
39351 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
39352 }
39353
39354@@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
39355 phdr.p_offset = offset;
39356 phdr.p_vaddr = vma->vm_start;
39357 phdr.p_paddr = 0;
39358- phdr.p_filesz = vma_dump_size(vma, mm_flags);
39359+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
39360 phdr.p_memsz = vma->vm_end - vma->vm_start;
39361 offset += phdr.p_filesz;
39362 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
39363@@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
39364 unsigned long addr;
39365 unsigned long end;
39366
39367- end = vma->vm_start + vma_dump_size(vma, mm_flags);
39368+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
39369
39370 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
39371 struct page *page;
39372@@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
39373 page = get_dump_page(addr);
39374 if (page) {
39375 void *kaddr = kmap(page);
39376+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
39377 stop = ((size += PAGE_SIZE) > limit) ||
39378 !dump_write(file, kaddr, PAGE_SIZE);
39379 kunmap(page);
39380@@ -2042,6 +2356,97 @@ out:
39381
39382 #endif /* USE_ELF_CORE_DUMP */
39383
39384+#ifdef CONFIG_PAX_MPROTECT
39385+/* PaX: non-PIC ELF libraries need relocations on their executable segments
39386+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
39387+ * we'll remove VM_MAYWRITE for good on RELRO segments.
39388+ *
39389+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
39390+ * basis because we want to allow the common case and not the special ones.
39391+ */
39392+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
39393+{
39394+ struct elfhdr elf_h;
39395+ struct elf_phdr elf_p;
39396+ unsigned long i;
39397+ unsigned long oldflags;
39398+ bool is_textrel_rw, is_textrel_rx, is_relro;
39399+
39400+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
39401+ return;
39402+
39403+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
39404+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
39405+
39406+#ifdef CONFIG_PAX_ELFRELOCS
39407+ /* possible TEXTREL */
39408+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
39409+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
39410+#else
39411+ is_textrel_rw = false;
39412+ is_textrel_rx = false;
39413+#endif
39414+
39415+ /* possible RELRO */
39416+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
39417+
39418+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
39419+ return;
39420+
39421+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
39422+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
39423+
39424+#ifdef CONFIG_PAX_ETEXECRELOCS
39425+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39426+#else
39427+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
39428+#endif
39429+
39430+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39431+ !elf_check_arch(&elf_h) ||
39432+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
39433+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
39434+ return;
39435+
39436+ for (i = 0UL; i < elf_h.e_phnum; i++) {
39437+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
39438+ return;
39439+ switch (elf_p.p_type) {
39440+ case PT_DYNAMIC:
39441+ if (!is_textrel_rw && !is_textrel_rx)
39442+ continue;
39443+ i = 0UL;
39444+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
39445+ elf_dyn dyn;
39446+
39447+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
39448+ return;
39449+ if (dyn.d_tag == DT_NULL)
39450+ return;
39451+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
39452+ gr_log_textrel(vma);
39453+ if (is_textrel_rw)
39454+ vma->vm_flags |= VM_MAYWRITE;
39455+ else
39456+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
39457+ vma->vm_flags &= ~VM_MAYWRITE;
39458+ return;
39459+ }
39460+ i++;
39461+ }
39462+ return;
39463+
39464+ case PT_GNU_RELRO:
39465+ if (!is_relro)
39466+ continue;
39467+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
39468+ vma->vm_flags &= ~VM_MAYWRITE;
39469+ return;
39470+ }
39471+ }
39472+}
39473+#endif
39474+
39475 static int __init init_elf_binfmt(void)
39476 {
39477 return register_binfmt(&elf_format);
39478diff -urNp linux-2.6.32.46/fs/binfmt_flat.c linux-2.6.32.46/fs/binfmt_flat.c
39479--- linux-2.6.32.46/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
39480+++ linux-2.6.32.46/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
39481@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
39482 realdatastart = (unsigned long) -ENOMEM;
39483 printk("Unable to allocate RAM for process data, errno %d\n",
39484 (int)-realdatastart);
39485+ down_write(&current->mm->mmap_sem);
39486 do_munmap(current->mm, textpos, text_len);
39487+ up_write(&current->mm->mmap_sem);
39488 ret = realdatastart;
39489 goto err;
39490 }
39491@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
39492 }
39493 if (IS_ERR_VALUE(result)) {
39494 printk("Unable to read data+bss, errno %d\n", (int)-result);
39495+ down_write(&current->mm->mmap_sem);
39496 do_munmap(current->mm, textpos, text_len);
39497 do_munmap(current->mm, realdatastart, data_len + extra);
39498+ up_write(&current->mm->mmap_sem);
39499 ret = result;
39500 goto err;
39501 }
39502@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
39503 }
39504 if (IS_ERR_VALUE(result)) {
39505 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
39506+ down_write(&current->mm->mmap_sem);
39507 do_munmap(current->mm, textpos, text_len + data_len + extra +
39508 MAX_SHARED_LIBS * sizeof(unsigned long));
39509+ up_write(&current->mm->mmap_sem);
39510 ret = result;
39511 goto err;
39512 }
39513diff -urNp linux-2.6.32.46/fs/bio.c linux-2.6.32.46/fs/bio.c
39514--- linux-2.6.32.46/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
39515+++ linux-2.6.32.46/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
39516@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
39517
39518 i = 0;
39519 while (i < bio_slab_nr) {
39520- struct bio_slab *bslab = &bio_slabs[i];
39521+ bslab = &bio_slabs[i];
39522
39523 if (!bslab->slab && entry == -1)
39524 entry = i;
39525@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
39526 const int read = bio_data_dir(bio) == READ;
39527 struct bio_map_data *bmd = bio->bi_private;
39528 int i;
39529- char *p = bmd->sgvecs[0].iov_base;
39530+ char *p = (__force char *)bmd->sgvecs[0].iov_base;
39531
39532 __bio_for_each_segment(bvec, bio, i, 0) {
39533 char *addr = page_address(bvec->bv_page);
39534diff -urNp linux-2.6.32.46/fs/block_dev.c linux-2.6.32.46/fs/block_dev.c
39535--- linux-2.6.32.46/fs/block_dev.c 2011-08-09 18:35:29.000000000 -0400
39536+++ linux-2.6.32.46/fs/block_dev.c 2011-08-09 18:34:00.000000000 -0400
39537@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
39538 else if (bdev->bd_contains == bdev)
39539 res = 0; /* is a whole device which isn't held */
39540
39541- else if (bdev->bd_contains->bd_holder == bd_claim)
39542+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
39543 res = 0; /* is a partition of a device that is being partitioned */
39544 else if (bdev->bd_contains->bd_holder != NULL)
39545 res = -EBUSY; /* is a partition of a held device */
39546diff -urNp linux-2.6.32.46/fs/btrfs/ctree.c linux-2.6.32.46/fs/btrfs/ctree.c
39547--- linux-2.6.32.46/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
39548+++ linux-2.6.32.46/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
39549@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
39550 free_extent_buffer(buf);
39551 add_root_to_dirty_list(root);
39552 } else {
39553- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
39554- parent_start = parent->start;
39555- else
39556+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
39557+ if (parent)
39558+ parent_start = parent->start;
39559+ else
39560+ parent_start = 0;
39561+ } else
39562 parent_start = 0;
39563
39564 WARN_ON(trans->transid != btrfs_header_generation(parent));
39565@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
39566
39567 ret = 0;
39568 if (slot == 0) {
39569- struct btrfs_disk_key disk_key;
39570 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
39571 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
39572 }
39573diff -urNp linux-2.6.32.46/fs/btrfs/disk-io.c linux-2.6.32.46/fs/btrfs/disk-io.c
39574--- linux-2.6.32.46/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
39575+++ linux-2.6.32.46/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
39576@@ -39,7 +39,7 @@
39577 #include "tree-log.h"
39578 #include "free-space-cache.h"
39579
39580-static struct extent_io_ops btree_extent_io_ops;
39581+static const struct extent_io_ops btree_extent_io_ops;
39582 static void end_workqueue_fn(struct btrfs_work *work);
39583 static void free_fs_root(struct btrfs_root *root);
39584
39585@@ -2607,7 +2607,7 @@ out:
39586 return 0;
39587 }
39588
39589-static struct extent_io_ops btree_extent_io_ops = {
39590+static const struct extent_io_ops btree_extent_io_ops = {
39591 .write_cache_pages_lock_hook = btree_lock_page_hook,
39592 .readpage_end_io_hook = btree_readpage_end_io_hook,
39593 .submit_bio_hook = btree_submit_bio_hook,
39594diff -urNp linux-2.6.32.46/fs/btrfs/extent_io.h linux-2.6.32.46/fs/btrfs/extent_io.h
39595--- linux-2.6.32.46/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
39596+++ linux-2.6.32.46/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
39597@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
39598 struct bio *bio, int mirror_num,
39599 unsigned long bio_flags);
39600 struct extent_io_ops {
39601- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
39602+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
39603 u64 start, u64 end, int *page_started,
39604 unsigned long *nr_written);
39605- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
39606- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
39607+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
39608+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
39609 extent_submit_bio_hook_t *submit_bio_hook;
39610- int (*merge_bio_hook)(struct page *page, unsigned long offset,
39611+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
39612 size_t size, struct bio *bio,
39613 unsigned long bio_flags);
39614- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
39615- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
39616+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
39617+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
39618 u64 start, u64 end,
39619 struct extent_state *state);
39620- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
39621+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
39622 u64 start, u64 end,
39623 struct extent_state *state);
39624- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
39625+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
39626 struct extent_state *state);
39627- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
39628+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
39629 struct extent_state *state, int uptodate);
39630- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
39631+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
39632 unsigned long old, unsigned long bits);
39633- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
39634+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
39635 unsigned long bits);
39636- int (*merge_extent_hook)(struct inode *inode,
39637+ int (* const merge_extent_hook)(struct inode *inode,
39638 struct extent_state *new,
39639 struct extent_state *other);
39640- int (*split_extent_hook)(struct inode *inode,
39641+ int (* const split_extent_hook)(struct inode *inode,
39642 struct extent_state *orig, u64 split);
39643- int (*write_cache_pages_lock_hook)(struct page *page);
39644+ int (* const write_cache_pages_lock_hook)(struct page *page);
39645 };
39646
39647 struct extent_io_tree {
39648@@ -88,7 +88,7 @@ struct extent_io_tree {
39649 u64 dirty_bytes;
39650 spinlock_t lock;
39651 spinlock_t buffer_lock;
39652- struct extent_io_ops *ops;
39653+ const struct extent_io_ops *ops;
39654 };
39655
39656 struct extent_state {
39657diff -urNp linux-2.6.32.46/fs/btrfs/extent-tree.c linux-2.6.32.46/fs/btrfs/extent-tree.c
39658--- linux-2.6.32.46/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
39659+++ linux-2.6.32.46/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
39660@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
39661 u64 group_start = group->key.objectid;
39662 new_extents = kmalloc(sizeof(*new_extents),
39663 GFP_NOFS);
39664+ if (!new_extents) {
39665+ ret = -ENOMEM;
39666+ goto out;
39667+ }
39668 nr_extents = 1;
39669 ret = get_new_locations(reloc_inode,
39670 extent_key,
39671diff -urNp linux-2.6.32.46/fs/btrfs/free-space-cache.c linux-2.6.32.46/fs/btrfs/free-space-cache.c
39672--- linux-2.6.32.46/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
39673+++ linux-2.6.32.46/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
39674@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
39675
39676 while(1) {
39677 if (entry->bytes < bytes || entry->offset < min_start) {
39678- struct rb_node *node;
39679-
39680 node = rb_next(&entry->offset_index);
39681 if (!node)
39682 break;
39683@@ -1226,7 +1224,7 @@ again:
39684 */
39685 while (entry->bitmap || found_bitmap ||
39686 (!entry->bitmap && entry->bytes < min_bytes)) {
39687- struct rb_node *node = rb_next(&entry->offset_index);
39688+ node = rb_next(&entry->offset_index);
39689
39690 if (entry->bitmap && entry->bytes > bytes + empty_size) {
39691 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
39692diff -urNp linux-2.6.32.46/fs/btrfs/inode.c linux-2.6.32.46/fs/btrfs/inode.c
39693--- linux-2.6.32.46/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
39694+++ linux-2.6.32.46/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
39695@@ -63,7 +63,7 @@ static const struct inode_operations btr
39696 static const struct address_space_operations btrfs_aops;
39697 static const struct address_space_operations btrfs_symlink_aops;
39698 static const struct file_operations btrfs_dir_file_operations;
39699-static struct extent_io_ops btrfs_extent_io_ops;
39700+static const struct extent_io_ops btrfs_extent_io_ops;
39701
39702 static struct kmem_cache *btrfs_inode_cachep;
39703 struct kmem_cache *btrfs_trans_handle_cachep;
39704@@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
39705 1, 0, NULL, GFP_NOFS);
39706 while (start < end) {
39707 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
39708+ BUG_ON(!async_cow);
39709 async_cow->inode = inode;
39710 async_cow->root = root;
39711 async_cow->locked_page = locked_page;
39712@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
39713 inline_size = btrfs_file_extent_inline_item_len(leaf,
39714 btrfs_item_nr(leaf, path->slots[0]));
39715 tmp = kmalloc(inline_size, GFP_NOFS);
39716+ if (!tmp)
39717+ return -ENOMEM;
39718 ptr = btrfs_file_extent_inline_start(item);
39719
39720 read_extent_buffer(leaf, tmp, ptr, inline_size);
39721@@ -5410,7 +5413,7 @@ fail:
39722 return -ENOMEM;
39723 }
39724
39725-static int btrfs_getattr(struct vfsmount *mnt,
39726+int btrfs_getattr(struct vfsmount *mnt,
39727 struct dentry *dentry, struct kstat *stat)
39728 {
39729 struct inode *inode = dentry->d_inode;
39730@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
39731 return 0;
39732 }
39733
39734+EXPORT_SYMBOL(btrfs_getattr);
39735+
39736+dev_t get_btrfs_dev_from_inode(struct inode *inode)
39737+{
39738+ return BTRFS_I(inode)->root->anon_super.s_dev;
39739+}
39740+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
39741+
39742 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
39743 struct inode *new_dir, struct dentry *new_dentry)
39744 {
39745@@ -5972,7 +5983,7 @@ static const struct file_operations btrf
39746 .fsync = btrfs_sync_file,
39747 };
39748
39749-static struct extent_io_ops btrfs_extent_io_ops = {
39750+static const struct extent_io_ops btrfs_extent_io_ops = {
39751 .fill_delalloc = run_delalloc_range,
39752 .submit_bio_hook = btrfs_submit_bio_hook,
39753 .merge_bio_hook = btrfs_merge_bio_hook,
39754diff -urNp linux-2.6.32.46/fs/btrfs/relocation.c linux-2.6.32.46/fs/btrfs/relocation.c
39755--- linux-2.6.32.46/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
39756+++ linux-2.6.32.46/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
39757@@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
39758 }
39759 spin_unlock(&rc->reloc_root_tree.lock);
39760
39761- BUG_ON((struct btrfs_root *)node->data != root);
39762+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
39763
39764 if (!del) {
39765 spin_lock(&rc->reloc_root_tree.lock);
39766diff -urNp linux-2.6.32.46/fs/btrfs/sysfs.c linux-2.6.32.46/fs/btrfs/sysfs.c
39767--- linux-2.6.32.46/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
39768+++ linux-2.6.32.46/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
39769@@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
39770 complete(&root->kobj_unregister);
39771 }
39772
39773-static struct sysfs_ops btrfs_super_attr_ops = {
39774+static const struct sysfs_ops btrfs_super_attr_ops = {
39775 .show = btrfs_super_attr_show,
39776 .store = btrfs_super_attr_store,
39777 };
39778
39779-static struct sysfs_ops btrfs_root_attr_ops = {
39780+static const struct sysfs_ops btrfs_root_attr_ops = {
39781 .show = btrfs_root_attr_show,
39782 .store = btrfs_root_attr_store,
39783 };
39784diff -urNp linux-2.6.32.46/fs/buffer.c linux-2.6.32.46/fs/buffer.c
39785--- linux-2.6.32.46/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
39786+++ linux-2.6.32.46/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
39787@@ -25,6 +25,7 @@
39788 #include <linux/percpu.h>
39789 #include <linux/slab.h>
39790 #include <linux/capability.h>
39791+#include <linux/security.h>
39792 #include <linux/blkdev.h>
39793 #include <linux/file.h>
39794 #include <linux/quotaops.h>
39795diff -urNp linux-2.6.32.46/fs/cachefiles/bind.c linux-2.6.32.46/fs/cachefiles/bind.c
39796--- linux-2.6.32.46/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
39797+++ linux-2.6.32.46/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
39798@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
39799 args);
39800
39801 /* start by checking things over */
39802- ASSERT(cache->fstop_percent >= 0 &&
39803- cache->fstop_percent < cache->fcull_percent &&
39804+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
39805 cache->fcull_percent < cache->frun_percent &&
39806 cache->frun_percent < 100);
39807
39808- ASSERT(cache->bstop_percent >= 0 &&
39809- cache->bstop_percent < cache->bcull_percent &&
39810+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
39811 cache->bcull_percent < cache->brun_percent &&
39812 cache->brun_percent < 100);
39813
39814diff -urNp linux-2.6.32.46/fs/cachefiles/daemon.c linux-2.6.32.46/fs/cachefiles/daemon.c
39815--- linux-2.6.32.46/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
39816+++ linux-2.6.32.46/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
39817@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
39818 if (test_bit(CACHEFILES_DEAD, &cache->flags))
39819 return -EIO;
39820
39821- if (datalen < 0 || datalen > PAGE_SIZE - 1)
39822+ if (datalen > PAGE_SIZE - 1)
39823 return -EOPNOTSUPP;
39824
39825 /* drag the command string into the kernel so we can parse it */
39826@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
39827 if (args[0] != '%' || args[1] != '\0')
39828 return -EINVAL;
39829
39830- if (fstop < 0 || fstop >= cache->fcull_percent)
39831+ if (fstop >= cache->fcull_percent)
39832 return cachefiles_daemon_range_error(cache, args);
39833
39834 cache->fstop_percent = fstop;
39835@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
39836 if (args[0] != '%' || args[1] != '\0')
39837 return -EINVAL;
39838
39839- if (bstop < 0 || bstop >= cache->bcull_percent)
39840+ if (bstop >= cache->bcull_percent)
39841 return cachefiles_daemon_range_error(cache, args);
39842
39843 cache->bstop_percent = bstop;
39844diff -urNp linux-2.6.32.46/fs/cachefiles/internal.h linux-2.6.32.46/fs/cachefiles/internal.h
39845--- linux-2.6.32.46/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
39846+++ linux-2.6.32.46/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
39847@@ -56,7 +56,7 @@ struct cachefiles_cache {
39848 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
39849 struct rb_root active_nodes; /* active nodes (can't be culled) */
39850 rwlock_t active_lock; /* lock for active_nodes */
39851- atomic_t gravecounter; /* graveyard uniquifier */
39852+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
39853 unsigned frun_percent; /* when to stop culling (% files) */
39854 unsigned fcull_percent; /* when to start culling (% files) */
39855 unsigned fstop_percent; /* when to stop allocating (% files) */
39856@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
39857 * proc.c
39858 */
39859 #ifdef CONFIG_CACHEFILES_HISTOGRAM
39860-extern atomic_t cachefiles_lookup_histogram[HZ];
39861-extern atomic_t cachefiles_mkdir_histogram[HZ];
39862-extern atomic_t cachefiles_create_histogram[HZ];
39863+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
39864+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
39865+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
39866
39867 extern int __init cachefiles_proc_init(void);
39868 extern void cachefiles_proc_cleanup(void);
39869 static inline
39870-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
39871+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
39872 {
39873 unsigned long jif = jiffies - start_jif;
39874 if (jif >= HZ)
39875 jif = HZ - 1;
39876- atomic_inc(&histogram[jif]);
39877+ atomic_inc_unchecked(&histogram[jif]);
39878 }
39879
39880 #else
39881diff -urNp linux-2.6.32.46/fs/cachefiles/namei.c linux-2.6.32.46/fs/cachefiles/namei.c
39882--- linux-2.6.32.46/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
39883+++ linux-2.6.32.46/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
39884@@ -250,7 +250,7 @@ try_again:
39885 /* first step is to make up a grave dentry in the graveyard */
39886 sprintf(nbuffer, "%08x%08x",
39887 (uint32_t) get_seconds(),
39888- (uint32_t) atomic_inc_return(&cache->gravecounter));
39889+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
39890
39891 /* do the multiway lock magic */
39892 trap = lock_rename(cache->graveyard, dir);
39893diff -urNp linux-2.6.32.46/fs/cachefiles/proc.c linux-2.6.32.46/fs/cachefiles/proc.c
39894--- linux-2.6.32.46/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
39895+++ linux-2.6.32.46/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
39896@@ -14,9 +14,9 @@
39897 #include <linux/seq_file.h>
39898 #include "internal.h"
39899
39900-atomic_t cachefiles_lookup_histogram[HZ];
39901-atomic_t cachefiles_mkdir_histogram[HZ];
39902-atomic_t cachefiles_create_histogram[HZ];
39903+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
39904+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
39905+atomic_unchecked_t cachefiles_create_histogram[HZ];
39906
39907 /*
39908 * display the latency histogram
39909@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
39910 return 0;
39911 default:
39912 index = (unsigned long) v - 3;
39913- x = atomic_read(&cachefiles_lookup_histogram[index]);
39914- y = atomic_read(&cachefiles_mkdir_histogram[index]);
39915- z = atomic_read(&cachefiles_create_histogram[index]);
39916+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
39917+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
39918+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
39919 if (x == 0 && y == 0 && z == 0)
39920 return 0;
39921
39922diff -urNp linux-2.6.32.46/fs/cachefiles/rdwr.c linux-2.6.32.46/fs/cachefiles/rdwr.c
39923--- linux-2.6.32.46/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
39924+++ linux-2.6.32.46/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
39925@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
39926 old_fs = get_fs();
39927 set_fs(KERNEL_DS);
39928 ret = file->f_op->write(
39929- file, (const void __user *) data, len, &pos);
39930+ file, (__force const void __user *) data, len, &pos);
39931 set_fs(old_fs);
39932 kunmap(page);
39933 if (ret != len)
39934diff -urNp linux-2.6.32.46/fs/cifs/cifs_debug.c linux-2.6.32.46/fs/cifs/cifs_debug.c
39935--- linux-2.6.32.46/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
39936+++ linux-2.6.32.46/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
39937@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
39938 tcon = list_entry(tmp3,
39939 struct cifsTconInfo,
39940 tcon_list);
39941- atomic_set(&tcon->num_smbs_sent, 0);
39942- atomic_set(&tcon->num_writes, 0);
39943- atomic_set(&tcon->num_reads, 0);
39944- atomic_set(&tcon->num_oplock_brks, 0);
39945- atomic_set(&tcon->num_opens, 0);
39946- atomic_set(&tcon->num_posixopens, 0);
39947- atomic_set(&tcon->num_posixmkdirs, 0);
39948- atomic_set(&tcon->num_closes, 0);
39949- atomic_set(&tcon->num_deletes, 0);
39950- atomic_set(&tcon->num_mkdirs, 0);
39951- atomic_set(&tcon->num_rmdirs, 0);
39952- atomic_set(&tcon->num_renames, 0);
39953- atomic_set(&tcon->num_t2renames, 0);
39954- atomic_set(&tcon->num_ffirst, 0);
39955- atomic_set(&tcon->num_fnext, 0);
39956- atomic_set(&tcon->num_fclose, 0);
39957- atomic_set(&tcon->num_hardlinks, 0);
39958- atomic_set(&tcon->num_symlinks, 0);
39959- atomic_set(&tcon->num_locks, 0);
39960+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
39961+ atomic_set_unchecked(&tcon->num_writes, 0);
39962+ atomic_set_unchecked(&tcon->num_reads, 0);
39963+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
39964+ atomic_set_unchecked(&tcon->num_opens, 0);
39965+ atomic_set_unchecked(&tcon->num_posixopens, 0);
39966+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
39967+ atomic_set_unchecked(&tcon->num_closes, 0);
39968+ atomic_set_unchecked(&tcon->num_deletes, 0);
39969+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
39970+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
39971+ atomic_set_unchecked(&tcon->num_renames, 0);
39972+ atomic_set_unchecked(&tcon->num_t2renames, 0);
39973+ atomic_set_unchecked(&tcon->num_ffirst, 0);
39974+ atomic_set_unchecked(&tcon->num_fnext, 0);
39975+ atomic_set_unchecked(&tcon->num_fclose, 0);
39976+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
39977+ atomic_set_unchecked(&tcon->num_symlinks, 0);
39978+ atomic_set_unchecked(&tcon->num_locks, 0);
39979 }
39980 }
39981 }
39982@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
39983 if (tcon->need_reconnect)
39984 seq_puts(m, "\tDISCONNECTED ");
39985 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
39986- atomic_read(&tcon->num_smbs_sent),
39987- atomic_read(&tcon->num_oplock_brks));
39988+ atomic_read_unchecked(&tcon->num_smbs_sent),
39989+ atomic_read_unchecked(&tcon->num_oplock_brks));
39990 seq_printf(m, "\nReads: %d Bytes: %lld",
39991- atomic_read(&tcon->num_reads),
39992+ atomic_read_unchecked(&tcon->num_reads),
39993 (long long)(tcon->bytes_read));
39994 seq_printf(m, "\nWrites: %d Bytes: %lld",
39995- atomic_read(&tcon->num_writes),
39996+ atomic_read_unchecked(&tcon->num_writes),
39997 (long long)(tcon->bytes_written));
39998 seq_printf(m, "\nFlushes: %d",
39999- atomic_read(&tcon->num_flushes));
40000+ atomic_read_unchecked(&tcon->num_flushes));
40001 seq_printf(m, "\nLocks: %d HardLinks: %d "
40002 "Symlinks: %d",
40003- atomic_read(&tcon->num_locks),
40004- atomic_read(&tcon->num_hardlinks),
40005- atomic_read(&tcon->num_symlinks));
40006+ atomic_read_unchecked(&tcon->num_locks),
40007+ atomic_read_unchecked(&tcon->num_hardlinks),
40008+ atomic_read_unchecked(&tcon->num_symlinks));
40009 seq_printf(m, "\nOpens: %d Closes: %d "
40010 "Deletes: %d",
40011- atomic_read(&tcon->num_opens),
40012- atomic_read(&tcon->num_closes),
40013- atomic_read(&tcon->num_deletes));
40014+ atomic_read_unchecked(&tcon->num_opens),
40015+ atomic_read_unchecked(&tcon->num_closes),
40016+ atomic_read_unchecked(&tcon->num_deletes));
40017 seq_printf(m, "\nPosix Opens: %d "
40018 "Posix Mkdirs: %d",
40019- atomic_read(&tcon->num_posixopens),
40020- atomic_read(&tcon->num_posixmkdirs));
40021+ atomic_read_unchecked(&tcon->num_posixopens),
40022+ atomic_read_unchecked(&tcon->num_posixmkdirs));
40023 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40024- atomic_read(&tcon->num_mkdirs),
40025- atomic_read(&tcon->num_rmdirs));
40026+ atomic_read_unchecked(&tcon->num_mkdirs),
40027+ atomic_read_unchecked(&tcon->num_rmdirs));
40028 seq_printf(m, "\nRenames: %d T2 Renames %d",
40029- atomic_read(&tcon->num_renames),
40030- atomic_read(&tcon->num_t2renames));
40031+ atomic_read_unchecked(&tcon->num_renames),
40032+ atomic_read_unchecked(&tcon->num_t2renames));
40033 seq_printf(m, "\nFindFirst: %d FNext %d "
40034 "FClose %d",
40035- atomic_read(&tcon->num_ffirst),
40036- atomic_read(&tcon->num_fnext),
40037- atomic_read(&tcon->num_fclose));
40038+ atomic_read_unchecked(&tcon->num_ffirst),
40039+ atomic_read_unchecked(&tcon->num_fnext),
40040+ atomic_read_unchecked(&tcon->num_fclose));
40041 }
40042 }
40043 }
40044diff -urNp linux-2.6.32.46/fs/cifs/cifsfs.c linux-2.6.32.46/fs/cifs/cifsfs.c
40045--- linux-2.6.32.46/fs/cifs/cifsfs.c 2011-03-27 14:31:47.000000000 -0400
40046+++ linux-2.6.32.46/fs/cifs/cifsfs.c 2011-08-25 17:17:57.000000000 -0400
40047@@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
40048 cifs_req_cachep = kmem_cache_create("cifs_request",
40049 CIFSMaxBufSize +
40050 MAX_CIFS_HDR_SIZE, 0,
40051- SLAB_HWCACHE_ALIGN, NULL);
40052+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
40053 if (cifs_req_cachep == NULL)
40054 return -ENOMEM;
40055
40056@@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
40057 efficient to alloc 1 per page off the slab compared to 17K (5page)
40058 alloc of large cifs buffers even when page debugging is on */
40059 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
40060- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
40061+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
40062 NULL);
40063 if (cifs_sm_req_cachep == NULL) {
40064 mempool_destroy(cifs_req_poolp);
40065@@ -991,8 +991,8 @@ init_cifs(void)
40066 atomic_set(&bufAllocCount, 0);
40067 atomic_set(&smBufAllocCount, 0);
40068 #ifdef CONFIG_CIFS_STATS2
40069- atomic_set(&totBufAllocCount, 0);
40070- atomic_set(&totSmBufAllocCount, 0);
40071+ atomic_set_unchecked(&totBufAllocCount, 0);
40072+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40073 #endif /* CONFIG_CIFS_STATS2 */
40074
40075 atomic_set(&midCount, 0);
40076diff -urNp linux-2.6.32.46/fs/cifs/cifsglob.h linux-2.6.32.46/fs/cifs/cifsglob.h
40077--- linux-2.6.32.46/fs/cifs/cifsglob.h 2011-08-09 18:35:29.000000000 -0400
40078+++ linux-2.6.32.46/fs/cifs/cifsglob.h 2011-08-25 17:17:57.000000000 -0400
40079@@ -252,28 +252,28 @@ struct cifsTconInfo {
40080 __u16 Flags; /* optional support bits */
40081 enum statusEnum tidStatus;
40082 #ifdef CONFIG_CIFS_STATS
40083- atomic_t num_smbs_sent;
40084- atomic_t num_writes;
40085- atomic_t num_reads;
40086- atomic_t num_flushes;
40087- atomic_t num_oplock_brks;
40088- atomic_t num_opens;
40089- atomic_t num_closes;
40090- atomic_t num_deletes;
40091- atomic_t num_mkdirs;
40092- atomic_t num_posixopens;
40093- atomic_t num_posixmkdirs;
40094- atomic_t num_rmdirs;
40095- atomic_t num_renames;
40096- atomic_t num_t2renames;
40097- atomic_t num_ffirst;
40098- atomic_t num_fnext;
40099- atomic_t num_fclose;
40100- atomic_t num_hardlinks;
40101- atomic_t num_symlinks;
40102- atomic_t num_locks;
40103- atomic_t num_acl_get;
40104- atomic_t num_acl_set;
40105+ atomic_unchecked_t num_smbs_sent;
40106+ atomic_unchecked_t num_writes;
40107+ atomic_unchecked_t num_reads;
40108+ atomic_unchecked_t num_flushes;
40109+ atomic_unchecked_t num_oplock_brks;
40110+ atomic_unchecked_t num_opens;
40111+ atomic_unchecked_t num_closes;
40112+ atomic_unchecked_t num_deletes;
40113+ atomic_unchecked_t num_mkdirs;
40114+ atomic_unchecked_t num_posixopens;
40115+ atomic_unchecked_t num_posixmkdirs;
40116+ atomic_unchecked_t num_rmdirs;
40117+ atomic_unchecked_t num_renames;
40118+ atomic_unchecked_t num_t2renames;
40119+ atomic_unchecked_t num_ffirst;
40120+ atomic_unchecked_t num_fnext;
40121+ atomic_unchecked_t num_fclose;
40122+ atomic_unchecked_t num_hardlinks;
40123+ atomic_unchecked_t num_symlinks;
40124+ atomic_unchecked_t num_locks;
40125+ atomic_unchecked_t num_acl_get;
40126+ atomic_unchecked_t num_acl_set;
40127 #ifdef CONFIG_CIFS_STATS2
40128 unsigned long long time_writes;
40129 unsigned long long time_reads;
40130@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
40131 }
40132
40133 #ifdef CONFIG_CIFS_STATS
40134-#define cifs_stats_inc atomic_inc
40135+#define cifs_stats_inc atomic_inc_unchecked
40136
40137 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
40138 unsigned int bytes)
40139@@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
40140 /* Various Debug counters */
40141 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
40142 #ifdef CONFIG_CIFS_STATS2
40143-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
40144-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
40145+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
40146+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
40147 #endif
40148 GLOBAL_EXTERN atomic_t smBufAllocCount;
40149 GLOBAL_EXTERN atomic_t midCount;
40150diff -urNp linux-2.6.32.46/fs/cifs/link.c linux-2.6.32.46/fs/cifs/link.c
40151--- linux-2.6.32.46/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
40152+++ linux-2.6.32.46/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
40153@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
40154
40155 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
40156 {
40157- char *p = nd_get_link(nd);
40158+ const char *p = nd_get_link(nd);
40159 if (!IS_ERR(p))
40160 kfree(p);
40161 }
40162diff -urNp linux-2.6.32.46/fs/cifs/misc.c linux-2.6.32.46/fs/cifs/misc.c
40163--- linux-2.6.32.46/fs/cifs/misc.c 2011-03-27 14:31:47.000000000 -0400
40164+++ linux-2.6.32.46/fs/cifs/misc.c 2011-08-25 17:17:57.000000000 -0400
40165@@ -155,7 +155,7 @@ cifs_buf_get(void)
40166 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
40167 atomic_inc(&bufAllocCount);
40168 #ifdef CONFIG_CIFS_STATS2
40169- atomic_inc(&totBufAllocCount);
40170+ atomic_inc_unchecked(&totBufAllocCount);
40171 #endif /* CONFIG_CIFS_STATS2 */
40172 }
40173
40174@@ -190,7 +190,7 @@ cifs_small_buf_get(void)
40175 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
40176 atomic_inc(&smBufAllocCount);
40177 #ifdef CONFIG_CIFS_STATS2
40178- atomic_inc(&totSmBufAllocCount);
40179+ atomic_inc_unchecked(&totSmBufAllocCount);
40180 #endif /* CONFIG_CIFS_STATS2 */
40181
40182 }
40183diff -urNp linux-2.6.32.46/fs/coda/cache.c linux-2.6.32.46/fs/coda/cache.c
40184--- linux-2.6.32.46/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
40185+++ linux-2.6.32.46/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
40186@@ -24,14 +24,14 @@
40187 #include <linux/coda_fs_i.h>
40188 #include <linux/coda_cache.h>
40189
40190-static atomic_t permission_epoch = ATOMIC_INIT(0);
40191+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
40192
40193 /* replace or extend an acl cache hit */
40194 void coda_cache_enter(struct inode *inode, int mask)
40195 {
40196 struct coda_inode_info *cii = ITOC(inode);
40197
40198- cii->c_cached_epoch = atomic_read(&permission_epoch);
40199+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
40200 if (cii->c_uid != current_fsuid()) {
40201 cii->c_uid = current_fsuid();
40202 cii->c_cached_perm = mask;
40203@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
40204 void coda_cache_clear_inode(struct inode *inode)
40205 {
40206 struct coda_inode_info *cii = ITOC(inode);
40207- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
40208+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
40209 }
40210
40211 /* remove all acl caches */
40212 void coda_cache_clear_all(struct super_block *sb)
40213 {
40214- atomic_inc(&permission_epoch);
40215+ atomic_inc_unchecked(&permission_epoch);
40216 }
40217
40218
40219@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
40220
40221 hit = (mask & cii->c_cached_perm) == mask &&
40222 cii->c_uid == current_fsuid() &&
40223- cii->c_cached_epoch == atomic_read(&permission_epoch);
40224+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
40225
40226 return hit;
40227 }
40228diff -urNp linux-2.6.32.46/fs/compat_binfmt_elf.c linux-2.6.32.46/fs/compat_binfmt_elf.c
40229--- linux-2.6.32.46/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
40230+++ linux-2.6.32.46/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
40231@@ -29,10 +29,12 @@
40232 #undef elfhdr
40233 #undef elf_phdr
40234 #undef elf_note
40235+#undef elf_dyn
40236 #undef elf_addr_t
40237 #define elfhdr elf32_hdr
40238 #define elf_phdr elf32_phdr
40239 #define elf_note elf32_note
40240+#define elf_dyn Elf32_Dyn
40241 #define elf_addr_t Elf32_Addr
40242
40243 /*
40244diff -urNp linux-2.6.32.46/fs/compat.c linux-2.6.32.46/fs/compat.c
40245--- linux-2.6.32.46/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
40246+++ linux-2.6.32.46/fs/compat.c 2011-08-11 19:56:56.000000000 -0400
40247@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
40248
40249 struct compat_readdir_callback {
40250 struct compat_old_linux_dirent __user *dirent;
40251+ struct file * file;
40252 int result;
40253 };
40254
40255@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
40256 buf->result = -EOVERFLOW;
40257 return -EOVERFLOW;
40258 }
40259+
40260+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40261+ return 0;
40262+
40263 buf->result++;
40264 dirent = buf->dirent;
40265 if (!access_ok(VERIFY_WRITE, dirent,
40266@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
40267
40268 buf.result = 0;
40269 buf.dirent = dirent;
40270+ buf.file = file;
40271
40272 error = vfs_readdir(file, compat_fillonedir, &buf);
40273 if (buf.result)
40274@@ -899,6 +905,7 @@ struct compat_linux_dirent {
40275 struct compat_getdents_callback {
40276 struct compat_linux_dirent __user *current_dir;
40277 struct compat_linux_dirent __user *previous;
40278+ struct file * file;
40279 int count;
40280 int error;
40281 };
40282@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
40283 buf->error = -EOVERFLOW;
40284 return -EOVERFLOW;
40285 }
40286+
40287+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40288+ return 0;
40289+
40290 dirent = buf->previous;
40291 if (dirent) {
40292 if (__put_user(offset, &dirent->d_off))
40293@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
40294 buf.previous = NULL;
40295 buf.count = count;
40296 buf.error = 0;
40297+ buf.file = file;
40298
40299 error = vfs_readdir(file, compat_filldir, &buf);
40300 if (error >= 0)
40301@@ -987,6 +999,7 @@ out:
40302 struct compat_getdents_callback64 {
40303 struct linux_dirent64 __user *current_dir;
40304 struct linux_dirent64 __user *previous;
40305+ struct file * file;
40306 int count;
40307 int error;
40308 };
40309@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
40310 buf->error = -EINVAL; /* only used if we fail.. */
40311 if (reclen > buf->count)
40312 return -EINVAL;
40313+
40314+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40315+ return 0;
40316+
40317 dirent = buf->previous;
40318
40319 if (dirent) {
40320@@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
40321 buf.previous = NULL;
40322 buf.count = count;
40323 buf.error = 0;
40324+ buf.file = file;
40325
40326 error = vfs_readdir(file, compat_filldir64, &buf);
40327 if (error >= 0)
40328@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
40329 * verify all the pointers
40330 */
40331 ret = -EINVAL;
40332- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
40333+ if (nr_segs > UIO_MAXIOV)
40334 goto out;
40335 if (!file->f_op)
40336 goto out;
40337@@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
40338 compat_uptr_t __user *envp,
40339 struct pt_regs * regs)
40340 {
40341+#ifdef CONFIG_GRKERNSEC
40342+ struct file *old_exec_file;
40343+ struct acl_subject_label *old_acl;
40344+ struct rlimit old_rlim[RLIM_NLIMITS];
40345+#endif
40346 struct linux_binprm *bprm;
40347 struct file *file;
40348 struct files_struct *displaced;
40349 bool clear_in_exec;
40350 int retval;
40351+ const struct cred *cred = current_cred();
40352+
40353+ /*
40354+ * We move the actual failure in case of RLIMIT_NPROC excess from
40355+ * set*uid() to execve() because too many poorly written programs
40356+ * don't check setuid() return code. Here we additionally recheck
40357+ * whether NPROC limit is still exceeded.
40358+ */
40359+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
40360+
40361+ if ((current->flags & PF_NPROC_EXCEEDED) &&
40362+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
40363+ retval = -EAGAIN;
40364+ goto out_ret;
40365+ }
40366+
40367+ /* We're below the limit (still or again), so we don't want to make
40368+ * further execve() calls fail. */
40369+ current->flags &= ~PF_NPROC_EXCEEDED;
40370
40371 retval = unshare_files(&displaced);
40372 if (retval)
40373@@ -1499,6 +1541,15 @@ int compat_do_execve(char * filename,
40374 bprm->filename = filename;
40375 bprm->interp = filename;
40376
40377+ if (gr_process_user_ban()) {
40378+ retval = -EPERM;
40379+ goto out_file;
40380+ }
40381+
40382+ retval = -EACCES;
40383+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
40384+ goto out_file;
40385+
40386 retval = bprm_mm_init(bprm);
40387 if (retval)
40388 goto out_file;
40389@@ -1528,9 +1579,40 @@ int compat_do_execve(char * filename,
40390 if (retval < 0)
40391 goto out;
40392
40393+ if (!gr_tpe_allow(file)) {
40394+ retval = -EACCES;
40395+ goto out;
40396+ }
40397+
40398+ if (gr_check_crash_exec(file)) {
40399+ retval = -EACCES;
40400+ goto out;
40401+ }
40402+
40403+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
40404+
40405+ gr_handle_exec_args_compat(bprm, argv);
40406+
40407+#ifdef CONFIG_GRKERNSEC
40408+ old_acl = current->acl;
40409+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
40410+ old_exec_file = current->exec_file;
40411+ get_file(file);
40412+ current->exec_file = file;
40413+#endif
40414+
40415+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
40416+ bprm->unsafe & LSM_UNSAFE_SHARE);
40417+ if (retval < 0)
40418+ goto out_fail;
40419+
40420 retval = search_binary_handler(bprm, regs);
40421 if (retval < 0)
40422- goto out;
40423+ goto out_fail;
40424+#ifdef CONFIG_GRKERNSEC
40425+ if (old_exec_file)
40426+ fput(old_exec_file);
40427+#endif
40428
40429 /* execve succeeded */
40430 current->fs->in_exec = 0;
40431@@ -1541,6 +1623,14 @@ int compat_do_execve(char * filename,
40432 put_files_struct(displaced);
40433 return retval;
40434
40435+out_fail:
40436+#ifdef CONFIG_GRKERNSEC
40437+ current->acl = old_acl;
40438+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
40439+ fput(current->exec_file);
40440+ current->exec_file = old_exec_file;
40441+#endif
40442+
40443 out:
40444 if (bprm->mm) {
40445 acct_arg_size(bprm, 0);
40446@@ -1711,6 +1801,8 @@ int compat_core_sys_select(int n, compat
40447 struct fdtable *fdt;
40448 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40449
40450+ pax_track_stack();
40451+
40452 if (n < 0)
40453 goto out_nofds;
40454
40455diff -urNp linux-2.6.32.46/fs/compat_ioctl.c linux-2.6.32.46/fs/compat_ioctl.c
40456--- linux-2.6.32.46/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
40457+++ linux-2.6.32.46/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
40458@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
40459 up = (struct compat_video_spu_palette __user *) arg;
40460 err = get_user(palp, &up->palette);
40461 err |= get_user(length, &up->length);
40462+ if (err)
40463+ return -EFAULT;
40464
40465 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
40466 err = put_user(compat_ptr(palp), &up_native->palette);
40467diff -urNp linux-2.6.32.46/fs/configfs/dir.c linux-2.6.32.46/fs/configfs/dir.c
40468--- linux-2.6.32.46/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
40469+++ linux-2.6.32.46/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
40470@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
40471 }
40472 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
40473 struct configfs_dirent *next;
40474- const char * name;
40475+ const unsigned char * name;
40476+ char d_name[sizeof(next->s_dentry->d_iname)];
40477 int len;
40478
40479 next = list_entry(p, struct configfs_dirent,
40480@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
40481 continue;
40482
40483 name = configfs_get_name(next);
40484- len = strlen(name);
40485+ if (next->s_dentry && name == next->s_dentry->d_iname) {
40486+ len = next->s_dentry->d_name.len;
40487+ memcpy(d_name, name, len);
40488+ name = d_name;
40489+ } else
40490+ len = strlen(name);
40491 if (next->s_dentry)
40492 ino = next->s_dentry->d_inode->i_ino;
40493 else
40494diff -urNp linux-2.6.32.46/fs/dcache.c linux-2.6.32.46/fs/dcache.c
40495--- linux-2.6.32.46/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
40496+++ linux-2.6.32.46/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
40497@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
40498
40499 static struct kmem_cache *dentry_cache __read_mostly;
40500
40501-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
40502-
40503 /*
40504 * This is the single most critical data structure when it comes
40505 * to the dcache: the hashtable for lookups. Somebody should try
40506@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
40507 mempages -= reserve;
40508
40509 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
40510- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
40511+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
40512
40513 dcache_init();
40514 inode_init();
40515diff -urNp linux-2.6.32.46/fs/dlm/lockspace.c linux-2.6.32.46/fs/dlm/lockspace.c
40516--- linux-2.6.32.46/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
40517+++ linux-2.6.32.46/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
40518@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
40519 kfree(ls);
40520 }
40521
40522-static struct sysfs_ops dlm_attr_ops = {
40523+static const struct sysfs_ops dlm_attr_ops = {
40524 .show = dlm_attr_show,
40525 .store = dlm_attr_store,
40526 };
40527diff -urNp linux-2.6.32.46/fs/ecryptfs/inode.c linux-2.6.32.46/fs/ecryptfs/inode.c
40528--- linux-2.6.32.46/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40529+++ linux-2.6.32.46/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
40530@@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
40531 old_fs = get_fs();
40532 set_fs(get_ds());
40533 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
40534- (char __user *)lower_buf,
40535+ (__force char __user *)lower_buf,
40536 lower_bufsiz);
40537 set_fs(old_fs);
40538 if (rc < 0)
40539@@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
40540 }
40541 old_fs = get_fs();
40542 set_fs(get_ds());
40543- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
40544+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
40545 set_fs(old_fs);
40546 if (rc < 0)
40547 goto out_free;
40548diff -urNp linux-2.6.32.46/fs/exec.c linux-2.6.32.46/fs/exec.c
40549--- linux-2.6.32.46/fs/exec.c 2011-06-25 12:55:34.000000000 -0400
40550+++ linux-2.6.32.46/fs/exec.c 2011-08-11 19:56:19.000000000 -0400
40551@@ -56,12 +56,24 @@
40552 #include <linux/fsnotify.h>
40553 #include <linux/fs_struct.h>
40554 #include <linux/pipe_fs_i.h>
40555+#include <linux/random.h>
40556+#include <linux/seq_file.h>
40557+
40558+#ifdef CONFIG_PAX_REFCOUNT
40559+#include <linux/kallsyms.h>
40560+#include <linux/kdebug.h>
40561+#endif
40562
40563 #include <asm/uaccess.h>
40564 #include <asm/mmu_context.h>
40565 #include <asm/tlb.h>
40566 #include "internal.h"
40567
40568+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
40569+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
40570+EXPORT_SYMBOL(pax_set_initial_flags_func);
40571+#endif
40572+
40573 int core_uses_pid;
40574 char core_pattern[CORENAME_MAX_SIZE] = "core";
40575 unsigned int core_pipe_limit;
40576@@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
40577 goto out;
40578
40579 file = do_filp_open(AT_FDCWD, tmp,
40580- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
40581+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
40582 MAY_READ | MAY_EXEC | MAY_OPEN);
40583 putname(tmp);
40584 error = PTR_ERR(file);
40585@@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
40586 int write)
40587 {
40588 struct page *page;
40589- int ret;
40590
40591-#ifdef CONFIG_STACK_GROWSUP
40592- if (write) {
40593- ret = expand_stack_downwards(bprm->vma, pos);
40594- if (ret < 0)
40595- return NULL;
40596- }
40597-#endif
40598- ret = get_user_pages(current, bprm->mm, pos,
40599- 1, write, 1, &page, NULL);
40600- if (ret <= 0)
40601+ if (0 > expand_stack_downwards(bprm->vma, pos))
40602+ return NULL;
40603+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
40604 return NULL;
40605
40606 if (write) {
40607@@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
40608 vma->vm_end = STACK_TOP_MAX;
40609 vma->vm_start = vma->vm_end - PAGE_SIZE;
40610 vma->vm_flags = VM_STACK_FLAGS;
40611+
40612+#ifdef CONFIG_PAX_SEGMEXEC
40613+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
40614+#endif
40615+
40616 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
40617
40618 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
40619@@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
40620 mm->stack_vm = mm->total_vm = 1;
40621 up_write(&mm->mmap_sem);
40622 bprm->p = vma->vm_end - sizeof(void *);
40623+
40624+#ifdef CONFIG_PAX_RANDUSTACK
40625+ if (randomize_va_space)
40626+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
40627+#endif
40628+
40629 return 0;
40630 err:
40631 up_write(&mm->mmap_sem);
40632@@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
40633 int r;
40634 mm_segment_t oldfs = get_fs();
40635 set_fs(KERNEL_DS);
40636- r = copy_strings(argc, (char __user * __user *)argv, bprm);
40637+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
40638 set_fs(oldfs);
40639 return r;
40640 }
40641@@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
40642 unsigned long new_end = old_end - shift;
40643 struct mmu_gather *tlb;
40644
40645- BUG_ON(new_start > new_end);
40646+ if (new_start >= new_end || new_start < mmap_min_addr)
40647+ return -ENOMEM;
40648
40649 /*
40650 * ensure there are no vmas between where we want to go
40651@@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
40652 if (vma != find_vma(mm, new_start))
40653 return -EFAULT;
40654
40655+#ifdef CONFIG_PAX_SEGMEXEC
40656+ BUG_ON(pax_find_mirror_vma(vma));
40657+#endif
40658+
40659 /*
40660 * cover the whole range: [new_start, old_end)
40661 */
40662@@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
40663 stack_top = arch_align_stack(stack_top);
40664 stack_top = PAGE_ALIGN(stack_top);
40665
40666- if (unlikely(stack_top < mmap_min_addr) ||
40667- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
40668- return -ENOMEM;
40669-
40670 stack_shift = vma->vm_end - stack_top;
40671
40672 bprm->p -= stack_shift;
40673@@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
40674 bprm->exec -= stack_shift;
40675
40676 down_write(&mm->mmap_sem);
40677+
40678+ /* Move stack pages down in memory. */
40679+ if (stack_shift) {
40680+ ret = shift_arg_pages(vma, stack_shift);
40681+ if (ret)
40682+ goto out_unlock;
40683+ }
40684+
40685 vm_flags = VM_STACK_FLAGS;
40686
40687 /*
40688@@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
40689 vm_flags &= ~VM_EXEC;
40690 vm_flags |= mm->def_flags;
40691
40692+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40693+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40694+ vm_flags &= ~VM_EXEC;
40695+
40696+#ifdef CONFIG_PAX_MPROTECT
40697+ if (mm->pax_flags & MF_PAX_MPROTECT)
40698+ vm_flags &= ~VM_MAYEXEC;
40699+#endif
40700+
40701+ }
40702+#endif
40703+
40704 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
40705 vm_flags);
40706 if (ret)
40707 goto out_unlock;
40708 BUG_ON(prev != vma);
40709
40710- /* Move stack pages down in memory. */
40711- if (stack_shift) {
40712- ret = shift_arg_pages(vma, stack_shift);
40713- if (ret)
40714- goto out_unlock;
40715- }
40716-
40717 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
40718 stack_size = vma->vm_end - vma->vm_start;
40719 /*
40720@@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
40721 int err;
40722
40723 file = do_filp_open(AT_FDCWD, name,
40724- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
40725+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
40726 MAY_EXEC | MAY_OPEN);
40727 if (IS_ERR(file))
40728 goto out;
40729@@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
40730 old_fs = get_fs();
40731 set_fs(get_ds());
40732 /* The cast to a user pointer is valid due to the set_fs() */
40733- result = vfs_read(file, (void __user *)addr, count, &pos);
40734+ result = vfs_read(file, (__force void __user *)addr, count, &pos);
40735 set_fs(old_fs);
40736 return result;
40737 }
40738@@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
40739 }
40740 rcu_read_unlock();
40741
40742- if (p->fs->users > n_fs) {
40743+ if (atomic_read(&p->fs->users) > n_fs) {
40744 bprm->unsafe |= LSM_UNSAFE_SHARE;
40745 } else {
40746 res = -EAGAIN;
40747@@ -1347,11 +1376,35 @@ int do_execve(char * filename,
40748 char __user *__user *envp,
40749 struct pt_regs * regs)
40750 {
40751+#ifdef CONFIG_GRKERNSEC
40752+ struct file *old_exec_file;
40753+ struct acl_subject_label *old_acl;
40754+ struct rlimit old_rlim[RLIM_NLIMITS];
40755+#endif
40756 struct linux_binprm *bprm;
40757 struct file *file;
40758 struct files_struct *displaced;
40759 bool clear_in_exec;
40760 int retval;
40761+ const struct cred *cred = current_cred();
40762+
40763+ /*
40764+ * We move the actual failure in case of RLIMIT_NPROC excess from
40765+ * set*uid() to execve() because too many poorly written programs
40766+ * don't check setuid() return code. Here we additionally recheck
40767+ * whether NPROC limit is still exceeded.
40768+ */
40769+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
40770+
40771+ if ((current->flags & PF_NPROC_EXCEEDED) &&
40772+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
40773+ retval = -EAGAIN;
40774+ goto out_ret;
40775+ }
40776+
40777+ /* We're below the limit (still or again), so we don't want to make
40778+ * further execve() calls fail. */
40779+ current->flags &= ~PF_NPROC_EXCEEDED;
40780
40781 retval = unshare_files(&displaced);
40782 if (retval)
40783@@ -1383,6 +1436,16 @@ int do_execve(char * filename,
40784 bprm->filename = filename;
40785 bprm->interp = filename;
40786
40787+ if (gr_process_user_ban()) {
40788+ retval = -EPERM;
40789+ goto out_file;
40790+ }
40791+
40792+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
40793+ retval = -EACCES;
40794+ goto out_file;
40795+ }
40796+
40797 retval = bprm_mm_init(bprm);
40798 if (retval)
40799 goto out_file;
40800@@ -1412,10 +1475,41 @@ int do_execve(char * filename,
40801 if (retval < 0)
40802 goto out;
40803
40804+ if (!gr_tpe_allow(file)) {
40805+ retval = -EACCES;
40806+ goto out;
40807+ }
40808+
40809+ if (gr_check_crash_exec(file)) {
40810+ retval = -EACCES;
40811+ goto out;
40812+ }
40813+
40814+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
40815+
40816+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
40817+
40818+#ifdef CONFIG_GRKERNSEC
40819+ old_acl = current->acl;
40820+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
40821+ old_exec_file = current->exec_file;
40822+ get_file(file);
40823+ current->exec_file = file;
40824+#endif
40825+
40826+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
40827+ bprm->unsafe & LSM_UNSAFE_SHARE);
40828+ if (retval < 0)
40829+ goto out_fail;
40830+
40831 current->flags &= ~PF_KTHREAD;
40832 retval = search_binary_handler(bprm,regs);
40833 if (retval < 0)
40834- goto out;
40835+ goto out_fail;
40836+#ifdef CONFIG_GRKERNSEC
40837+ if (old_exec_file)
40838+ fput(old_exec_file);
40839+#endif
40840
40841 /* execve succeeded */
40842 current->fs->in_exec = 0;
40843@@ -1426,6 +1520,14 @@ int do_execve(char * filename,
40844 put_files_struct(displaced);
40845 return retval;
40846
40847+out_fail:
40848+#ifdef CONFIG_GRKERNSEC
40849+ current->acl = old_acl;
40850+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
40851+ fput(current->exec_file);
40852+ current->exec_file = old_exec_file;
40853+#endif
40854+
40855 out:
40856 if (bprm->mm) {
40857 acct_arg_size(bprm, 0);
40858@@ -1591,6 +1693,220 @@ out:
40859 return ispipe;
40860 }
40861
40862+int pax_check_flags(unsigned long *flags)
40863+{
40864+ int retval = 0;
40865+
40866+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
40867+ if (*flags & MF_PAX_SEGMEXEC)
40868+ {
40869+ *flags &= ~MF_PAX_SEGMEXEC;
40870+ retval = -EINVAL;
40871+ }
40872+#endif
40873+
40874+ if ((*flags & MF_PAX_PAGEEXEC)
40875+
40876+#ifdef CONFIG_PAX_PAGEEXEC
40877+ && (*flags & MF_PAX_SEGMEXEC)
40878+#endif
40879+
40880+ )
40881+ {
40882+ *flags &= ~MF_PAX_PAGEEXEC;
40883+ retval = -EINVAL;
40884+ }
40885+
40886+ if ((*flags & MF_PAX_MPROTECT)
40887+
40888+#ifdef CONFIG_PAX_MPROTECT
40889+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
40890+#endif
40891+
40892+ )
40893+ {
40894+ *flags &= ~MF_PAX_MPROTECT;
40895+ retval = -EINVAL;
40896+ }
40897+
40898+ if ((*flags & MF_PAX_EMUTRAMP)
40899+
40900+#ifdef CONFIG_PAX_EMUTRAMP
40901+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
40902+#endif
40903+
40904+ )
40905+ {
40906+ *flags &= ~MF_PAX_EMUTRAMP;
40907+ retval = -EINVAL;
40908+ }
40909+
40910+ return retval;
40911+}
40912+
40913+EXPORT_SYMBOL(pax_check_flags);
40914+
40915+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40916+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
40917+{
40918+ struct task_struct *tsk = current;
40919+ struct mm_struct *mm = current->mm;
40920+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
40921+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
40922+ char *path_exec = NULL;
40923+ char *path_fault = NULL;
40924+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
40925+
40926+ if (buffer_exec && buffer_fault) {
40927+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
40928+
40929+ down_read(&mm->mmap_sem);
40930+ vma = mm->mmap;
40931+ while (vma && (!vma_exec || !vma_fault)) {
40932+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
40933+ vma_exec = vma;
40934+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
40935+ vma_fault = vma;
40936+ vma = vma->vm_next;
40937+ }
40938+ if (vma_exec) {
40939+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
40940+ if (IS_ERR(path_exec))
40941+ path_exec = "<path too long>";
40942+ else {
40943+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
40944+ if (path_exec) {
40945+ *path_exec = 0;
40946+ path_exec = buffer_exec;
40947+ } else
40948+ path_exec = "<path too long>";
40949+ }
40950+ }
40951+ if (vma_fault) {
40952+ start = vma_fault->vm_start;
40953+ end = vma_fault->vm_end;
40954+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
40955+ if (vma_fault->vm_file) {
40956+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
40957+ if (IS_ERR(path_fault))
40958+ path_fault = "<path too long>";
40959+ else {
40960+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
40961+ if (path_fault) {
40962+ *path_fault = 0;
40963+ path_fault = buffer_fault;
40964+ } else
40965+ path_fault = "<path too long>";
40966+ }
40967+ } else
40968+ path_fault = "<anonymous mapping>";
40969+ }
40970+ up_read(&mm->mmap_sem);
40971+ }
40972+ if (tsk->signal->curr_ip)
40973+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
40974+ else
40975+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
40976+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
40977+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
40978+ task_uid(tsk), task_euid(tsk), pc, sp);
40979+ free_page((unsigned long)buffer_exec);
40980+ free_page((unsigned long)buffer_fault);
40981+ pax_report_insns(pc, sp);
40982+ do_coredump(SIGKILL, SIGKILL, regs);
40983+}
40984+#endif
40985+
40986+#ifdef CONFIG_PAX_REFCOUNT
40987+void pax_report_refcount_overflow(struct pt_regs *regs)
40988+{
40989+ if (current->signal->curr_ip)
40990+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
40991+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
40992+ else
40993+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
40994+ current->comm, task_pid_nr(current), current_uid(), current_euid());
40995+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
40996+ show_regs(regs);
40997+ force_sig_specific(SIGKILL, current);
40998+}
40999+#endif
41000+
41001+#ifdef CONFIG_PAX_USERCOPY
41002+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41003+int object_is_on_stack(const void *obj, unsigned long len)
41004+{
41005+ const void * const stack = task_stack_page(current);
41006+ const void * const stackend = stack + THREAD_SIZE;
41007+
41008+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41009+ const void *frame = NULL;
41010+ const void *oldframe;
41011+#endif
41012+
41013+ if (obj + len < obj)
41014+ return -1;
41015+
41016+ if (obj + len <= stack || stackend <= obj)
41017+ return 0;
41018+
41019+ if (obj < stack || stackend < obj + len)
41020+ return -1;
41021+
41022+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41023+ oldframe = __builtin_frame_address(1);
41024+ if (oldframe)
41025+ frame = __builtin_frame_address(2);
41026+ /*
41027+ low ----------------------------------------------> high
41028+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
41029+ ^----------------^
41030+ allow copies only within here
41031+ */
41032+ while (stack <= frame && frame < stackend) {
41033+ /* if obj + len extends past the last frame, this
41034+ check won't pass and the next frame will be 0,
41035+ causing us to bail out and correctly report
41036+ the copy as invalid
41037+ */
41038+ if (obj + len <= frame)
41039+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41040+ oldframe = frame;
41041+ frame = *(const void * const *)frame;
41042+ }
41043+ return -1;
41044+#else
41045+ return 1;
41046+#endif
41047+}
41048+
41049+
41050+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41051+{
41052+ if (current->signal->curr_ip)
41053+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41054+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41055+ else
41056+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41057+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41058+
41059+ dump_stack();
41060+ gr_handle_kernel_exploit();
41061+ do_group_exit(SIGKILL);
41062+}
41063+#endif
41064+
41065+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41066+void pax_track_stack(void)
41067+{
41068+ unsigned long sp = (unsigned long)&sp;
41069+ if (sp < current_thread_info()->lowest_stack &&
41070+ sp > (unsigned long)task_stack_page(current))
41071+ current_thread_info()->lowest_stack = sp;
41072+}
41073+EXPORT_SYMBOL(pax_track_stack);
41074+#endif
41075+
41076 static int zap_process(struct task_struct *start)
41077 {
41078 struct task_struct *t;
41079@@ -1793,17 +2109,17 @@ static void wait_for_dump_helpers(struct
41080 pipe = file->f_path.dentry->d_inode->i_pipe;
41081
41082 pipe_lock(pipe);
41083- pipe->readers++;
41084- pipe->writers--;
41085+ atomic_inc(&pipe->readers);
41086+ atomic_dec(&pipe->writers);
41087
41088- while ((pipe->readers > 1) && (!signal_pending(current))) {
41089+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41090 wake_up_interruptible_sync(&pipe->wait);
41091 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41092 pipe_wait(pipe);
41093 }
41094
41095- pipe->readers--;
41096- pipe->writers++;
41097+ atomic_dec(&pipe->readers);
41098+ atomic_inc(&pipe->writers);
41099 pipe_unlock(pipe);
41100
41101 }
41102@@ -1826,10 +2142,13 @@ void do_coredump(long signr, int exit_co
41103 char **helper_argv = NULL;
41104 int helper_argc = 0;
41105 int dump_count = 0;
41106- static atomic_t core_dump_count = ATOMIC_INIT(0);
41107+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41108
41109 audit_core_dumps(signr);
41110
41111+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41112+ gr_handle_brute_attach(current, mm->flags);
41113+
41114 binfmt = mm->binfmt;
41115 if (!binfmt || !binfmt->core_dump)
41116 goto fail;
41117@@ -1874,6 +2193,8 @@ void do_coredump(long signr, int exit_co
41118 */
41119 clear_thread_flag(TIF_SIGPENDING);
41120
41121+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
41122+
41123 /*
41124 * lock_kernel() because format_corename() is controlled by sysctl, which
41125 * uses lock_kernel()
41126@@ -1908,7 +2229,7 @@ void do_coredump(long signr, int exit_co
41127 goto fail_unlock;
41128 }
41129
41130- dump_count = atomic_inc_return(&core_dump_count);
41131+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
41132 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41133 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41134 task_tgid_vnr(current), current->comm);
41135@@ -1972,7 +2293,7 @@ close_fail:
41136 filp_close(file, NULL);
41137 fail_dropcount:
41138 if (dump_count)
41139- atomic_dec(&core_dump_count);
41140+ atomic_dec_unchecked(&core_dump_count);
41141 fail_unlock:
41142 if (helper_argv)
41143 argv_free(helper_argv);
41144diff -urNp linux-2.6.32.46/fs/ext2/balloc.c linux-2.6.32.46/fs/ext2/balloc.c
41145--- linux-2.6.32.46/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
41146+++ linux-2.6.32.46/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
41147@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
41148
41149 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41150 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41151- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41152+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41153 sbi->s_resuid != current_fsuid() &&
41154 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41155 return 0;
41156diff -urNp linux-2.6.32.46/fs/ext3/balloc.c linux-2.6.32.46/fs/ext3/balloc.c
41157--- linux-2.6.32.46/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
41158+++ linux-2.6.32.46/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
41159@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
41160
41161 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41162 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41163- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41164+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41165 sbi->s_resuid != current_fsuid() &&
41166 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41167 return 0;
41168diff -urNp linux-2.6.32.46/fs/ext4/balloc.c linux-2.6.32.46/fs/ext4/balloc.c
41169--- linux-2.6.32.46/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
41170+++ linux-2.6.32.46/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
41171@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
41172 /* Hm, nope. Are (enough) root reserved blocks available? */
41173 if (sbi->s_resuid == current_fsuid() ||
41174 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
41175- capable(CAP_SYS_RESOURCE)) {
41176+ capable_nolog(CAP_SYS_RESOURCE)) {
41177 if (free_blocks >= (nblocks + dirty_blocks))
41178 return 1;
41179 }
41180diff -urNp linux-2.6.32.46/fs/ext4/ext4.h linux-2.6.32.46/fs/ext4/ext4.h
41181--- linux-2.6.32.46/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
41182+++ linux-2.6.32.46/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
41183@@ -1078,19 +1078,19 @@ struct ext4_sb_info {
41184
41185 /* stats for buddy allocator */
41186 spinlock_t s_mb_pa_lock;
41187- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
41188- atomic_t s_bal_success; /* we found long enough chunks */
41189- atomic_t s_bal_allocated; /* in blocks */
41190- atomic_t s_bal_ex_scanned; /* total extents scanned */
41191- atomic_t s_bal_goals; /* goal hits */
41192- atomic_t s_bal_breaks; /* too long searches */
41193- atomic_t s_bal_2orders; /* 2^order hits */
41194+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
41195+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
41196+ atomic_unchecked_t s_bal_allocated; /* in blocks */
41197+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
41198+ atomic_unchecked_t s_bal_goals; /* goal hits */
41199+ atomic_unchecked_t s_bal_breaks; /* too long searches */
41200+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
41201 spinlock_t s_bal_lock;
41202 unsigned long s_mb_buddies_generated;
41203 unsigned long long s_mb_generation_time;
41204- atomic_t s_mb_lost_chunks;
41205- atomic_t s_mb_preallocated;
41206- atomic_t s_mb_discarded;
41207+ atomic_unchecked_t s_mb_lost_chunks;
41208+ atomic_unchecked_t s_mb_preallocated;
41209+ atomic_unchecked_t s_mb_discarded;
41210 atomic_t s_lock_busy;
41211
41212 /* locality groups */
41213diff -urNp linux-2.6.32.46/fs/ext4/mballoc.c linux-2.6.32.46/fs/ext4/mballoc.c
41214--- linux-2.6.32.46/fs/ext4/mballoc.c 2011-06-25 12:55:34.000000000 -0400
41215+++ linux-2.6.32.46/fs/ext4/mballoc.c 2011-06-25 12:56:37.000000000 -0400
41216@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
41217 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
41218
41219 if (EXT4_SB(sb)->s_mb_stats)
41220- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
41221+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
41222
41223 break;
41224 }
41225@@ -2131,7 +2131,7 @@ repeat:
41226 ac->ac_status = AC_STATUS_CONTINUE;
41227 ac->ac_flags |= EXT4_MB_HINT_FIRST;
41228 cr = 3;
41229- atomic_inc(&sbi->s_mb_lost_chunks);
41230+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
41231 goto repeat;
41232 }
41233 }
41234@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
41235 ext4_grpblk_t counters[16];
41236 } sg;
41237
41238+ pax_track_stack();
41239+
41240 group--;
41241 if (group == 0)
41242 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
41243@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
41244 if (sbi->s_mb_stats) {
41245 printk(KERN_INFO
41246 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
41247- atomic_read(&sbi->s_bal_allocated),
41248- atomic_read(&sbi->s_bal_reqs),
41249- atomic_read(&sbi->s_bal_success));
41250+ atomic_read_unchecked(&sbi->s_bal_allocated),
41251+ atomic_read_unchecked(&sbi->s_bal_reqs),
41252+ atomic_read_unchecked(&sbi->s_bal_success));
41253 printk(KERN_INFO
41254 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
41255 "%u 2^N hits, %u breaks, %u lost\n",
41256- atomic_read(&sbi->s_bal_ex_scanned),
41257- atomic_read(&sbi->s_bal_goals),
41258- atomic_read(&sbi->s_bal_2orders),
41259- atomic_read(&sbi->s_bal_breaks),
41260- atomic_read(&sbi->s_mb_lost_chunks));
41261+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
41262+ atomic_read_unchecked(&sbi->s_bal_goals),
41263+ atomic_read_unchecked(&sbi->s_bal_2orders),
41264+ atomic_read_unchecked(&sbi->s_bal_breaks),
41265+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
41266 printk(KERN_INFO
41267 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
41268 sbi->s_mb_buddies_generated++,
41269 sbi->s_mb_generation_time);
41270 printk(KERN_INFO
41271 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
41272- atomic_read(&sbi->s_mb_preallocated),
41273- atomic_read(&sbi->s_mb_discarded));
41274+ atomic_read_unchecked(&sbi->s_mb_preallocated),
41275+ atomic_read_unchecked(&sbi->s_mb_discarded));
41276 }
41277
41278 free_percpu(sbi->s_locality_groups);
41279@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
41280 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
41281
41282 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
41283- atomic_inc(&sbi->s_bal_reqs);
41284- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41285+ atomic_inc_unchecked(&sbi->s_bal_reqs);
41286+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41287 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
41288- atomic_inc(&sbi->s_bal_success);
41289- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
41290+ atomic_inc_unchecked(&sbi->s_bal_success);
41291+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
41292 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
41293 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
41294- atomic_inc(&sbi->s_bal_goals);
41295+ atomic_inc_unchecked(&sbi->s_bal_goals);
41296 if (ac->ac_found > sbi->s_mb_max_to_scan)
41297- atomic_inc(&sbi->s_bal_breaks);
41298+ atomic_inc_unchecked(&sbi->s_bal_breaks);
41299 }
41300
41301 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
41302@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
41303 trace_ext4_mb_new_inode_pa(ac, pa);
41304
41305 ext4_mb_use_inode_pa(ac, pa);
41306- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41307+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41308
41309 ei = EXT4_I(ac->ac_inode);
41310 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41311@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
41312 trace_ext4_mb_new_group_pa(ac, pa);
41313
41314 ext4_mb_use_group_pa(ac, pa);
41315- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41316+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41317
41318 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41319 lg = ac->ac_lg;
41320@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
41321 * from the bitmap and continue.
41322 */
41323 }
41324- atomic_add(free, &sbi->s_mb_discarded);
41325+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
41326
41327 return err;
41328 }
41329@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
41330 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
41331 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
41332 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
41333- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41334+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41335
41336 if (ac) {
41337 ac->ac_sb = sb;
41338diff -urNp linux-2.6.32.46/fs/ext4/super.c linux-2.6.32.46/fs/ext4/super.c
41339--- linux-2.6.32.46/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
41340+++ linux-2.6.32.46/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
41341@@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
41342 }
41343
41344
41345-static struct sysfs_ops ext4_attr_ops = {
41346+static const struct sysfs_ops ext4_attr_ops = {
41347 .show = ext4_attr_show,
41348 .store = ext4_attr_store,
41349 };
41350diff -urNp linux-2.6.32.46/fs/fcntl.c linux-2.6.32.46/fs/fcntl.c
41351--- linux-2.6.32.46/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
41352+++ linux-2.6.32.46/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
41353@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
41354 if (err)
41355 return err;
41356
41357+ if (gr_handle_chroot_fowner(pid, type))
41358+ return -ENOENT;
41359+ if (gr_check_protected_task_fowner(pid, type))
41360+ return -EACCES;
41361+
41362 f_modown(filp, pid, type, force);
41363 return 0;
41364 }
41365@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
41366 switch (cmd) {
41367 case F_DUPFD:
41368 case F_DUPFD_CLOEXEC:
41369+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
41370 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41371 break;
41372 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
41373diff -urNp linux-2.6.32.46/fs/fifo.c linux-2.6.32.46/fs/fifo.c
41374--- linux-2.6.32.46/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
41375+++ linux-2.6.32.46/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
41376@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
41377 */
41378 filp->f_op = &read_pipefifo_fops;
41379 pipe->r_counter++;
41380- if (pipe->readers++ == 0)
41381+ if (atomic_inc_return(&pipe->readers) == 1)
41382 wake_up_partner(inode);
41383
41384- if (!pipe->writers) {
41385+ if (!atomic_read(&pipe->writers)) {
41386 if ((filp->f_flags & O_NONBLOCK)) {
41387 /* suppress POLLHUP until we have
41388 * seen a writer */
41389@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
41390 * errno=ENXIO when there is no process reading the FIFO.
41391 */
41392 ret = -ENXIO;
41393- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
41394+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
41395 goto err;
41396
41397 filp->f_op = &write_pipefifo_fops;
41398 pipe->w_counter++;
41399- if (!pipe->writers++)
41400+ if (atomic_inc_return(&pipe->writers) == 1)
41401 wake_up_partner(inode);
41402
41403- if (!pipe->readers) {
41404+ if (!atomic_read(&pipe->readers)) {
41405 wait_for_partner(inode, &pipe->r_counter);
41406 if (signal_pending(current))
41407 goto err_wr;
41408@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
41409 */
41410 filp->f_op = &rdwr_pipefifo_fops;
41411
41412- pipe->readers++;
41413- pipe->writers++;
41414+ atomic_inc(&pipe->readers);
41415+ atomic_inc(&pipe->writers);
41416 pipe->r_counter++;
41417 pipe->w_counter++;
41418- if (pipe->readers == 1 || pipe->writers == 1)
41419+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
41420 wake_up_partner(inode);
41421 break;
41422
41423@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
41424 return 0;
41425
41426 err_rd:
41427- if (!--pipe->readers)
41428+ if (atomic_dec_and_test(&pipe->readers))
41429 wake_up_interruptible(&pipe->wait);
41430 ret = -ERESTARTSYS;
41431 goto err;
41432
41433 err_wr:
41434- if (!--pipe->writers)
41435+ if (atomic_dec_and_test(&pipe->writers))
41436 wake_up_interruptible(&pipe->wait);
41437 ret = -ERESTARTSYS;
41438 goto err;
41439
41440 err:
41441- if (!pipe->readers && !pipe->writers)
41442+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
41443 free_pipe_info(inode);
41444
41445 err_nocleanup:
41446diff -urNp linux-2.6.32.46/fs/file.c linux-2.6.32.46/fs/file.c
41447--- linux-2.6.32.46/fs/file.c 2011-03-27 14:31:47.000000000 -0400
41448+++ linux-2.6.32.46/fs/file.c 2011-04-17 15:56:46.000000000 -0400
41449@@ -14,6 +14,7 @@
41450 #include <linux/slab.h>
41451 #include <linux/vmalloc.h>
41452 #include <linux/file.h>
41453+#include <linux/security.h>
41454 #include <linux/fdtable.h>
41455 #include <linux/bitops.h>
41456 #include <linux/interrupt.h>
41457@@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
41458 * N.B. For clone tasks sharing a files structure, this test
41459 * will limit the total number of files that can be opened.
41460 */
41461+
41462+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
41463 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41464 return -EMFILE;
41465
41466diff -urNp linux-2.6.32.46/fs/filesystems.c linux-2.6.32.46/fs/filesystems.c
41467--- linux-2.6.32.46/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
41468+++ linux-2.6.32.46/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
41469@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
41470 int len = dot ? dot - name : strlen(name);
41471
41472 fs = __get_fs_type(name, len);
41473+
41474+#ifdef CONFIG_GRKERNSEC_MODHARDEN
41475+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
41476+#else
41477 if (!fs && (request_module("%.*s", len, name) == 0))
41478+#endif
41479 fs = __get_fs_type(name, len);
41480
41481 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
41482diff -urNp linux-2.6.32.46/fs/fscache/cookie.c linux-2.6.32.46/fs/fscache/cookie.c
41483--- linux-2.6.32.46/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
41484+++ linux-2.6.32.46/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
41485@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
41486 parent ? (char *) parent->def->name : "<no-parent>",
41487 def->name, netfs_data);
41488
41489- fscache_stat(&fscache_n_acquires);
41490+ fscache_stat_unchecked(&fscache_n_acquires);
41491
41492 /* if there's no parent cookie, then we don't create one here either */
41493 if (!parent) {
41494- fscache_stat(&fscache_n_acquires_null);
41495+ fscache_stat_unchecked(&fscache_n_acquires_null);
41496 _leave(" [no parent]");
41497 return NULL;
41498 }
41499@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
41500 /* allocate and initialise a cookie */
41501 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
41502 if (!cookie) {
41503- fscache_stat(&fscache_n_acquires_oom);
41504+ fscache_stat_unchecked(&fscache_n_acquires_oom);
41505 _leave(" [ENOMEM]");
41506 return NULL;
41507 }
41508@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
41509
41510 switch (cookie->def->type) {
41511 case FSCACHE_COOKIE_TYPE_INDEX:
41512- fscache_stat(&fscache_n_cookie_index);
41513+ fscache_stat_unchecked(&fscache_n_cookie_index);
41514 break;
41515 case FSCACHE_COOKIE_TYPE_DATAFILE:
41516- fscache_stat(&fscache_n_cookie_data);
41517+ fscache_stat_unchecked(&fscache_n_cookie_data);
41518 break;
41519 default:
41520- fscache_stat(&fscache_n_cookie_special);
41521+ fscache_stat_unchecked(&fscache_n_cookie_special);
41522 break;
41523 }
41524
41525@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
41526 if (fscache_acquire_non_index_cookie(cookie) < 0) {
41527 atomic_dec(&parent->n_children);
41528 __fscache_cookie_put(cookie);
41529- fscache_stat(&fscache_n_acquires_nobufs);
41530+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
41531 _leave(" = NULL");
41532 return NULL;
41533 }
41534 }
41535
41536- fscache_stat(&fscache_n_acquires_ok);
41537+ fscache_stat_unchecked(&fscache_n_acquires_ok);
41538 _leave(" = %p", cookie);
41539 return cookie;
41540 }
41541@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
41542 cache = fscache_select_cache_for_object(cookie->parent);
41543 if (!cache) {
41544 up_read(&fscache_addremove_sem);
41545- fscache_stat(&fscache_n_acquires_no_cache);
41546+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
41547 _leave(" = -ENOMEDIUM [no cache]");
41548 return -ENOMEDIUM;
41549 }
41550@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
41551 object = cache->ops->alloc_object(cache, cookie);
41552 fscache_stat_d(&fscache_n_cop_alloc_object);
41553 if (IS_ERR(object)) {
41554- fscache_stat(&fscache_n_object_no_alloc);
41555+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
41556 ret = PTR_ERR(object);
41557 goto error;
41558 }
41559
41560- fscache_stat(&fscache_n_object_alloc);
41561+ fscache_stat_unchecked(&fscache_n_object_alloc);
41562
41563 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
41564
41565@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
41566 struct fscache_object *object;
41567 struct hlist_node *_p;
41568
41569- fscache_stat(&fscache_n_updates);
41570+ fscache_stat_unchecked(&fscache_n_updates);
41571
41572 if (!cookie) {
41573- fscache_stat(&fscache_n_updates_null);
41574+ fscache_stat_unchecked(&fscache_n_updates_null);
41575 _leave(" [no cookie]");
41576 return;
41577 }
41578@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
41579 struct fscache_object *object;
41580 unsigned long event;
41581
41582- fscache_stat(&fscache_n_relinquishes);
41583+ fscache_stat_unchecked(&fscache_n_relinquishes);
41584 if (retire)
41585- fscache_stat(&fscache_n_relinquishes_retire);
41586+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
41587
41588 if (!cookie) {
41589- fscache_stat(&fscache_n_relinquishes_null);
41590+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
41591 _leave(" [no cookie]");
41592 return;
41593 }
41594@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
41595
41596 /* wait for the cookie to finish being instantiated (or to fail) */
41597 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
41598- fscache_stat(&fscache_n_relinquishes_waitcrt);
41599+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
41600 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
41601 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
41602 }
41603diff -urNp linux-2.6.32.46/fs/fscache/internal.h linux-2.6.32.46/fs/fscache/internal.h
41604--- linux-2.6.32.46/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
41605+++ linux-2.6.32.46/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
41606@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
41607 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
41608 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
41609
41610-extern atomic_t fscache_n_op_pend;
41611-extern atomic_t fscache_n_op_run;
41612-extern atomic_t fscache_n_op_enqueue;
41613-extern atomic_t fscache_n_op_deferred_release;
41614-extern atomic_t fscache_n_op_release;
41615-extern atomic_t fscache_n_op_gc;
41616-extern atomic_t fscache_n_op_cancelled;
41617-extern atomic_t fscache_n_op_rejected;
41618-
41619-extern atomic_t fscache_n_attr_changed;
41620-extern atomic_t fscache_n_attr_changed_ok;
41621-extern atomic_t fscache_n_attr_changed_nobufs;
41622-extern atomic_t fscache_n_attr_changed_nomem;
41623-extern atomic_t fscache_n_attr_changed_calls;
41624-
41625-extern atomic_t fscache_n_allocs;
41626-extern atomic_t fscache_n_allocs_ok;
41627-extern atomic_t fscache_n_allocs_wait;
41628-extern atomic_t fscache_n_allocs_nobufs;
41629-extern atomic_t fscache_n_allocs_intr;
41630-extern atomic_t fscache_n_allocs_object_dead;
41631-extern atomic_t fscache_n_alloc_ops;
41632-extern atomic_t fscache_n_alloc_op_waits;
41633-
41634-extern atomic_t fscache_n_retrievals;
41635-extern atomic_t fscache_n_retrievals_ok;
41636-extern atomic_t fscache_n_retrievals_wait;
41637-extern atomic_t fscache_n_retrievals_nodata;
41638-extern atomic_t fscache_n_retrievals_nobufs;
41639-extern atomic_t fscache_n_retrievals_intr;
41640-extern atomic_t fscache_n_retrievals_nomem;
41641-extern atomic_t fscache_n_retrievals_object_dead;
41642-extern atomic_t fscache_n_retrieval_ops;
41643-extern atomic_t fscache_n_retrieval_op_waits;
41644-
41645-extern atomic_t fscache_n_stores;
41646-extern atomic_t fscache_n_stores_ok;
41647-extern atomic_t fscache_n_stores_again;
41648-extern atomic_t fscache_n_stores_nobufs;
41649-extern atomic_t fscache_n_stores_oom;
41650-extern atomic_t fscache_n_store_ops;
41651-extern atomic_t fscache_n_store_calls;
41652-extern atomic_t fscache_n_store_pages;
41653-extern atomic_t fscache_n_store_radix_deletes;
41654-extern atomic_t fscache_n_store_pages_over_limit;
41655-
41656-extern atomic_t fscache_n_store_vmscan_not_storing;
41657-extern atomic_t fscache_n_store_vmscan_gone;
41658-extern atomic_t fscache_n_store_vmscan_busy;
41659-extern atomic_t fscache_n_store_vmscan_cancelled;
41660-
41661-extern atomic_t fscache_n_marks;
41662-extern atomic_t fscache_n_uncaches;
41663-
41664-extern atomic_t fscache_n_acquires;
41665-extern atomic_t fscache_n_acquires_null;
41666-extern atomic_t fscache_n_acquires_no_cache;
41667-extern atomic_t fscache_n_acquires_ok;
41668-extern atomic_t fscache_n_acquires_nobufs;
41669-extern atomic_t fscache_n_acquires_oom;
41670-
41671-extern atomic_t fscache_n_updates;
41672-extern atomic_t fscache_n_updates_null;
41673-extern atomic_t fscache_n_updates_run;
41674-
41675-extern atomic_t fscache_n_relinquishes;
41676-extern atomic_t fscache_n_relinquishes_null;
41677-extern atomic_t fscache_n_relinquishes_waitcrt;
41678-extern atomic_t fscache_n_relinquishes_retire;
41679-
41680-extern atomic_t fscache_n_cookie_index;
41681-extern atomic_t fscache_n_cookie_data;
41682-extern atomic_t fscache_n_cookie_special;
41683-
41684-extern atomic_t fscache_n_object_alloc;
41685-extern atomic_t fscache_n_object_no_alloc;
41686-extern atomic_t fscache_n_object_lookups;
41687-extern atomic_t fscache_n_object_lookups_negative;
41688-extern atomic_t fscache_n_object_lookups_positive;
41689-extern atomic_t fscache_n_object_lookups_timed_out;
41690-extern atomic_t fscache_n_object_created;
41691-extern atomic_t fscache_n_object_avail;
41692-extern atomic_t fscache_n_object_dead;
41693-
41694-extern atomic_t fscache_n_checkaux_none;
41695-extern atomic_t fscache_n_checkaux_okay;
41696-extern atomic_t fscache_n_checkaux_update;
41697-extern atomic_t fscache_n_checkaux_obsolete;
41698+extern atomic_unchecked_t fscache_n_op_pend;
41699+extern atomic_unchecked_t fscache_n_op_run;
41700+extern atomic_unchecked_t fscache_n_op_enqueue;
41701+extern atomic_unchecked_t fscache_n_op_deferred_release;
41702+extern atomic_unchecked_t fscache_n_op_release;
41703+extern atomic_unchecked_t fscache_n_op_gc;
41704+extern atomic_unchecked_t fscache_n_op_cancelled;
41705+extern atomic_unchecked_t fscache_n_op_rejected;
41706+
41707+extern atomic_unchecked_t fscache_n_attr_changed;
41708+extern atomic_unchecked_t fscache_n_attr_changed_ok;
41709+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
41710+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
41711+extern atomic_unchecked_t fscache_n_attr_changed_calls;
41712+
41713+extern atomic_unchecked_t fscache_n_allocs;
41714+extern atomic_unchecked_t fscache_n_allocs_ok;
41715+extern atomic_unchecked_t fscache_n_allocs_wait;
41716+extern atomic_unchecked_t fscache_n_allocs_nobufs;
41717+extern atomic_unchecked_t fscache_n_allocs_intr;
41718+extern atomic_unchecked_t fscache_n_allocs_object_dead;
41719+extern atomic_unchecked_t fscache_n_alloc_ops;
41720+extern atomic_unchecked_t fscache_n_alloc_op_waits;
41721+
41722+extern atomic_unchecked_t fscache_n_retrievals;
41723+extern atomic_unchecked_t fscache_n_retrievals_ok;
41724+extern atomic_unchecked_t fscache_n_retrievals_wait;
41725+extern atomic_unchecked_t fscache_n_retrievals_nodata;
41726+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
41727+extern atomic_unchecked_t fscache_n_retrievals_intr;
41728+extern atomic_unchecked_t fscache_n_retrievals_nomem;
41729+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
41730+extern atomic_unchecked_t fscache_n_retrieval_ops;
41731+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
41732+
41733+extern atomic_unchecked_t fscache_n_stores;
41734+extern atomic_unchecked_t fscache_n_stores_ok;
41735+extern atomic_unchecked_t fscache_n_stores_again;
41736+extern atomic_unchecked_t fscache_n_stores_nobufs;
41737+extern atomic_unchecked_t fscache_n_stores_oom;
41738+extern atomic_unchecked_t fscache_n_store_ops;
41739+extern atomic_unchecked_t fscache_n_store_calls;
41740+extern atomic_unchecked_t fscache_n_store_pages;
41741+extern atomic_unchecked_t fscache_n_store_radix_deletes;
41742+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
41743+
41744+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
41745+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
41746+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
41747+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
41748+
41749+extern atomic_unchecked_t fscache_n_marks;
41750+extern atomic_unchecked_t fscache_n_uncaches;
41751+
41752+extern atomic_unchecked_t fscache_n_acquires;
41753+extern atomic_unchecked_t fscache_n_acquires_null;
41754+extern atomic_unchecked_t fscache_n_acquires_no_cache;
41755+extern atomic_unchecked_t fscache_n_acquires_ok;
41756+extern atomic_unchecked_t fscache_n_acquires_nobufs;
41757+extern atomic_unchecked_t fscache_n_acquires_oom;
41758+
41759+extern atomic_unchecked_t fscache_n_updates;
41760+extern atomic_unchecked_t fscache_n_updates_null;
41761+extern atomic_unchecked_t fscache_n_updates_run;
41762+
41763+extern atomic_unchecked_t fscache_n_relinquishes;
41764+extern atomic_unchecked_t fscache_n_relinquishes_null;
41765+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
41766+extern atomic_unchecked_t fscache_n_relinquishes_retire;
41767+
41768+extern atomic_unchecked_t fscache_n_cookie_index;
41769+extern atomic_unchecked_t fscache_n_cookie_data;
41770+extern atomic_unchecked_t fscache_n_cookie_special;
41771+
41772+extern atomic_unchecked_t fscache_n_object_alloc;
41773+extern atomic_unchecked_t fscache_n_object_no_alloc;
41774+extern atomic_unchecked_t fscache_n_object_lookups;
41775+extern atomic_unchecked_t fscache_n_object_lookups_negative;
41776+extern atomic_unchecked_t fscache_n_object_lookups_positive;
41777+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
41778+extern atomic_unchecked_t fscache_n_object_created;
41779+extern atomic_unchecked_t fscache_n_object_avail;
41780+extern atomic_unchecked_t fscache_n_object_dead;
41781+
41782+extern atomic_unchecked_t fscache_n_checkaux_none;
41783+extern atomic_unchecked_t fscache_n_checkaux_okay;
41784+extern atomic_unchecked_t fscache_n_checkaux_update;
41785+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
41786
41787 extern atomic_t fscache_n_cop_alloc_object;
41788 extern atomic_t fscache_n_cop_lookup_object;
41789@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
41790 atomic_inc(stat);
41791 }
41792
41793+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
41794+{
41795+ atomic_inc_unchecked(stat);
41796+}
41797+
41798 static inline void fscache_stat_d(atomic_t *stat)
41799 {
41800 atomic_dec(stat);
41801@@ -259,6 +264,7 @@ extern const struct file_operations fsca
41802
41803 #define __fscache_stat(stat) (NULL)
41804 #define fscache_stat(stat) do {} while (0)
41805+#define fscache_stat_unchecked(stat) do {} while (0)
41806 #define fscache_stat_d(stat) do {} while (0)
41807 #endif
41808
41809diff -urNp linux-2.6.32.46/fs/fscache/object.c linux-2.6.32.46/fs/fscache/object.c
41810--- linux-2.6.32.46/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
41811+++ linux-2.6.32.46/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
41812@@ -144,7 +144,7 @@ static void fscache_object_state_machine
41813 /* update the object metadata on disk */
41814 case FSCACHE_OBJECT_UPDATING:
41815 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
41816- fscache_stat(&fscache_n_updates_run);
41817+ fscache_stat_unchecked(&fscache_n_updates_run);
41818 fscache_stat(&fscache_n_cop_update_object);
41819 object->cache->ops->update_object(object);
41820 fscache_stat_d(&fscache_n_cop_update_object);
41821@@ -233,7 +233,7 @@ static void fscache_object_state_machine
41822 spin_lock(&object->lock);
41823 object->state = FSCACHE_OBJECT_DEAD;
41824 spin_unlock(&object->lock);
41825- fscache_stat(&fscache_n_object_dead);
41826+ fscache_stat_unchecked(&fscache_n_object_dead);
41827 goto terminal_transit;
41828
41829 /* handle the parent cache of this object being withdrawn from
41830@@ -248,7 +248,7 @@ static void fscache_object_state_machine
41831 spin_lock(&object->lock);
41832 object->state = FSCACHE_OBJECT_DEAD;
41833 spin_unlock(&object->lock);
41834- fscache_stat(&fscache_n_object_dead);
41835+ fscache_stat_unchecked(&fscache_n_object_dead);
41836 goto terminal_transit;
41837
41838 /* complain about the object being woken up once it is
41839@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
41840 parent->cookie->def->name, cookie->def->name,
41841 object->cache->tag->name);
41842
41843- fscache_stat(&fscache_n_object_lookups);
41844+ fscache_stat_unchecked(&fscache_n_object_lookups);
41845 fscache_stat(&fscache_n_cop_lookup_object);
41846 ret = object->cache->ops->lookup_object(object);
41847 fscache_stat_d(&fscache_n_cop_lookup_object);
41848@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
41849 if (ret == -ETIMEDOUT) {
41850 /* probably stuck behind another object, so move this one to
41851 * the back of the queue */
41852- fscache_stat(&fscache_n_object_lookups_timed_out);
41853+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
41854 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
41855 }
41856
41857@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
41858
41859 spin_lock(&object->lock);
41860 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
41861- fscache_stat(&fscache_n_object_lookups_negative);
41862+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
41863
41864 /* transit here to allow write requests to begin stacking up
41865 * and read requests to begin returning ENODATA */
41866@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
41867 * result, in which case there may be data available */
41868 spin_lock(&object->lock);
41869 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
41870- fscache_stat(&fscache_n_object_lookups_positive);
41871+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
41872
41873 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
41874
41875@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
41876 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
41877 } else {
41878 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
41879- fscache_stat(&fscache_n_object_created);
41880+ fscache_stat_unchecked(&fscache_n_object_created);
41881
41882 object->state = FSCACHE_OBJECT_AVAILABLE;
41883 spin_unlock(&object->lock);
41884@@ -633,7 +633,7 @@ static void fscache_object_available(str
41885 fscache_enqueue_dependents(object);
41886
41887 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
41888- fscache_stat(&fscache_n_object_avail);
41889+ fscache_stat_unchecked(&fscache_n_object_avail);
41890
41891 _leave("");
41892 }
41893@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
41894 enum fscache_checkaux result;
41895
41896 if (!object->cookie->def->check_aux) {
41897- fscache_stat(&fscache_n_checkaux_none);
41898+ fscache_stat_unchecked(&fscache_n_checkaux_none);
41899 return FSCACHE_CHECKAUX_OKAY;
41900 }
41901
41902@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
41903 switch (result) {
41904 /* entry okay as is */
41905 case FSCACHE_CHECKAUX_OKAY:
41906- fscache_stat(&fscache_n_checkaux_okay);
41907+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
41908 break;
41909
41910 /* entry requires update */
41911 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
41912- fscache_stat(&fscache_n_checkaux_update);
41913+ fscache_stat_unchecked(&fscache_n_checkaux_update);
41914 break;
41915
41916 /* entry requires deletion */
41917 case FSCACHE_CHECKAUX_OBSOLETE:
41918- fscache_stat(&fscache_n_checkaux_obsolete);
41919+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
41920 break;
41921
41922 default:
41923diff -urNp linux-2.6.32.46/fs/fscache/operation.c linux-2.6.32.46/fs/fscache/operation.c
41924--- linux-2.6.32.46/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
41925+++ linux-2.6.32.46/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
41926@@ -16,7 +16,7 @@
41927 #include <linux/seq_file.h>
41928 #include "internal.h"
41929
41930-atomic_t fscache_op_debug_id;
41931+atomic_unchecked_t fscache_op_debug_id;
41932 EXPORT_SYMBOL(fscache_op_debug_id);
41933
41934 /**
41935@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
41936 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
41937 ASSERTCMP(atomic_read(&op->usage), >, 0);
41938
41939- fscache_stat(&fscache_n_op_enqueue);
41940+ fscache_stat_unchecked(&fscache_n_op_enqueue);
41941 switch (op->flags & FSCACHE_OP_TYPE) {
41942 case FSCACHE_OP_FAST:
41943 _debug("queue fast");
41944@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
41945 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
41946 if (op->processor)
41947 fscache_enqueue_operation(op);
41948- fscache_stat(&fscache_n_op_run);
41949+ fscache_stat_unchecked(&fscache_n_op_run);
41950 }
41951
41952 /*
41953@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
41954 if (object->n_ops > 0) {
41955 atomic_inc(&op->usage);
41956 list_add_tail(&op->pend_link, &object->pending_ops);
41957- fscache_stat(&fscache_n_op_pend);
41958+ fscache_stat_unchecked(&fscache_n_op_pend);
41959 } else if (!list_empty(&object->pending_ops)) {
41960 atomic_inc(&op->usage);
41961 list_add_tail(&op->pend_link, &object->pending_ops);
41962- fscache_stat(&fscache_n_op_pend);
41963+ fscache_stat_unchecked(&fscache_n_op_pend);
41964 fscache_start_operations(object);
41965 } else {
41966 ASSERTCMP(object->n_in_progress, ==, 0);
41967@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
41968 object->n_exclusive++; /* reads and writes must wait */
41969 atomic_inc(&op->usage);
41970 list_add_tail(&op->pend_link, &object->pending_ops);
41971- fscache_stat(&fscache_n_op_pend);
41972+ fscache_stat_unchecked(&fscache_n_op_pend);
41973 ret = 0;
41974 } else {
41975 /* not allowed to submit ops in any other state */
41976@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
41977 if (object->n_exclusive > 0) {
41978 atomic_inc(&op->usage);
41979 list_add_tail(&op->pend_link, &object->pending_ops);
41980- fscache_stat(&fscache_n_op_pend);
41981+ fscache_stat_unchecked(&fscache_n_op_pend);
41982 } else if (!list_empty(&object->pending_ops)) {
41983 atomic_inc(&op->usage);
41984 list_add_tail(&op->pend_link, &object->pending_ops);
41985- fscache_stat(&fscache_n_op_pend);
41986+ fscache_stat_unchecked(&fscache_n_op_pend);
41987 fscache_start_operations(object);
41988 } else {
41989 ASSERTCMP(object->n_exclusive, ==, 0);
41990@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
41991 object->n_ops++;
41992 atomic_inc(&op->usage);
41993 list_add_tail(&op->pend_link, &object->pending_ops);
41994- fscache_stat(&fscache_n_op_pend);
41995+ fscache_stat_unchecked(&fscache_n_op_pend);
41996 ret = 0;
41997 } else if (object->state == FSCACHE_OBJECT_DYING ||
41998 object->state == FSCACHE_OBJECT_LC_DYING ||
41999 object->state == FSCACHE_OBJECT_WITHDRAWING) {
42000- fscache_stat(&fscache_n_op_rejected);
42001+ fscache_stat_unchecked(&fscache_n_op_rejected);
42002 ret = -ENOBUFS;
42003 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
42004 fscache_report_unexpected_submission(object, op, ostate);
42005@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
42006
42007 ret = -EBUSY;
42008 if (!list_empty(&op->pend_link)) {
42009- fscache_stat(&fscache_n_op_cancelled);
42010+ fscache_stat_unchecked(&fscache_n_op_cancelled);
42011 list_del_init(&op->pend_link);
42012 object->n_ops--;
42013 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
42014@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
42015 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
42016 BUG();
42017
42018- fscache_stat(&fscache_n_op_release);
42019+ fscache_stat_unchecked(&fscache_n_op_release);
42020
42021 if (op->release) {
42022 op->release(op);
42023@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
42024 * lock, and defer it otherwise */
42025 if (!spin_trylock(&object->lock)) {
42026 _debug("defer put");
42027- fscache_stat(&fscache_n_op_deferred_release);
42028+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
42029
42030 cache = object->cache;
42031 spin_lock(&cache->op_gc_list_lock);
42032@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
42033
42034 _debug("GC DEFERRED REL OBJ%x OP%x",
42035 object->debug_id, op->debug_id);
42036- fscache_stat(&fscache_n_op_gc);
42037+ fscache_stat_unchecked(&fscache_n_op_gc);
42038
42039 ASSERTCMP(atomic_read(&op->usage), ==, 0);
42040
42041diff -urNp linux-2.6.32.46/fs/fscache/page.c linux-2.6.32.46/fs/fscache/page.c
42042--- linux-2.6.32.46/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
42043+++ linux-2.6.32.46/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
42044@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
42045 val = radix_tree_lookup(&cookie->stores, page->index);
42046 if (!val) {
42047 rcu_read_unlock();
42048- fscache_stat(&fscache_n_store_vmscan_not_storing);
42049+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
42050 __fscache_uncache_page(cookie, page);
42051 return true;
42052 }
42053@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
42054 spin_unlock(&cookie->stores_lock);
42055
42056 if (xpage) {
42057- fscache_stat(&fscache_n_store_vmscan_cancelled);
42058- fscache_stat(&fscache_n_store_radix_deletes);
42059+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
42060+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42061 ASSERTCMP(xpage, ==, page);
42062 } else {
42063- fscache_stat(&fscache_n_store_vmscan_gone);
42064+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
42065 }
42066
42067 wake_up_bit(&cookie->flags, 0);
42068@@ -106,7 +106,7 @@ page_busy:
42069 /* we might want to wait here, but that could deadlock the allocator as
42070 * the slow-work threads writing to the cache may all end up sleeping
42071 * on memory allocation */
42072- fscache_stat(&fscache_n_store_vmscan_busy);
42073+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
42074 return false;
42075 }
42076 EXPORT_SYMBOL(__fscache_maybe_release_page);
42077@@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
42078 FSCACHE_COOKIE_STORING_TAG);
42079 if (!radix_tree_tag_get(&cookie->stores, page->index,
42080 FSCACHE_COOKIE_PENDING_TAG)) {
42081- fscache_stat(&fscache_n_store_radix_deletes);
42082+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42083 xpage = radix_tree_delete(&cookie->stores, page->index);
42084 }
42085 spin_unlock(&cookie->stores_lock);
42086@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
42087
42088 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
42089
42090- fscache_stat(&fscache_n_attr_changed_calls);
42091+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
42092
42093 if (fscache_object_is_active(object)) {
42094 fscache_set_op_state(op, "CallFS");
42095@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
42096
42097 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42098
42099- fscache_stat(&fscache_n_attr_changed);
42100+ fscache_stat_unchecked(&fscache_n_attr_changed);
42101
42102 op = kzalloc(sizeof(*op), GFP_KERNEL);
42103 if (!op) {
42104- fscache_stat(&fscache_n_attr_changed_nomem);
42105+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
42106 _leave(" = -ENOMEM");
42107 return -ENOMEM;
42108 }
42109@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
42110 if (fscache_submit_exclusive_op(object, op) < 0)
42111 goto nobufs;
42112 spin_unlock(&cookie->lock);
42113- fscache_stat(&fscache_n_attr_changed_ok);
42114+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
42115 fscache_put_operation(op);
42116 _leave(" = 0");
42117 return 0;
42118@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
42119 nobufs:
42120 spin_unlock(&cookie->lock);
42121 kfree(op);
42122- fscache_stat(&fscache_n_attr_changed_nobufs);
42123+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
42124 _leave(" = %d", -ENOBUFS);
42125 return -ENOBUFS;
42126 }
42127@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
42128 /* allocate a retrieval operation and attempt to submit it */
42129 op = kzalloc(sizeof(*op), GFP_NOIO);
42130 if (!op) {
42131- fscache_stat(&fscache_n_retrievals_nomem);
42132+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42133 return NULL;
42134 }
42135
42136@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
42137 return 0;
42138 }
42139
42140- fscache_stat(&fscache_n_retrievals_wait);
42141+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
42142
42143 jif = jiffies;
42144 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
42145 fscache_wait_bit_interruptible,
42146 TASK_INTERRUPTIBLE) != 0) {
42147- fscache_stat(&fscache_n_retrievals_intr);
42148+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42149 _leave(" = -ERESTARTSYS");
42150 return -ERESTARTSYS;
42151 }
42152@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
42153 */
42154 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
42155 struct fscache_retrieval *op,
42156- atomic_t *stat_op_waits,
42157- atomic_t *stat_object_dead)
42158+ atomic_unchecked_t *stat_op_waits,
42159+ atomic_unchecked_t *stat_object_dead)
42160 {
42161 int ret;
42162
42163@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
42164 goto check_if_dead;
42165
42166 _debug(">>> WT");
42167- fscache_stat(stat_op_waits);
42168+ fscache_stat_unchecked(stat_op_waits);
42169 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
42170 fscache_wait_bit_interruptible,
42171 TASK_INTERRUPTIBLE) < 0) {
42172@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
42173
42174 check_if_dead:
42175 if (unlikely(fscache_object_is_dead(object))) {
42176- fscache_stat(stat_object_dead);
42177+ fscache_stat_unchecked(stat_object_dead);
42178 return -ENOBUFS;
42179 }
42180 return 0;
42181@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
42182
42183 _enter("%p,%p,,,", cookie, page);
42184
42185- fscache_stat(&fscache_n_retrievals);
42186+ fscache_stat_unchecked(&fscache_n_retrievals);
42187
42188 if (hlist_empty(&cookie->backing_objects))
42189 goto nobufs;
42190@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
42191 goto nobufs_unlock;
42192 spin_unlock(&cookie->lock);
42193
42194- fscache_stat(&fscache_n_retrieval_ops);
42195+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
42196
42197 /* pin the netfs read context in case we need to do the actual netfs
42198 * read because we've encountered a cache read failure */
42199@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
42200
42201 error:
42202 if (ret == -ENOMEM)
42203- fscache_stat(&fscache_n_retrievals_nomem);
42204+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42205 else if (ret == -ERESTARTSYS)
42206- fscache_stat(&fscache_n_retrievals_intr);
42207+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42208 else if (ret == -ENODATA)
42209- fscache_stat(&fscache_n_retrievals_nodata);
42210+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42211 else if (ret < 0)
42212- fscache_stat(&fscache_n_retrievals_nobufs);
42213+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42214 else
42215- fscache_stat(&fscache_n_retrievals_ok);
42216+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
42217
42218 fscache_put_retrieval(op);
42219 _leave(" = %d", ret);
42220@@ -453,7 +453,7 @@ nobufs_unlock:
42221 spin_unlock(&cookie->lock);
42222 kfree(op);
42223 nobufs:
42224- fscache_stat(&fscache_n_retrievals_nobufs);
42225+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42226 _leave(" = -ENOBUFS");
42227 return -ENOBUFS;
42228 }
42229@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
42230
42231 _enter("%p,,%d,,,", cookie, *nr_pages);
42232
42233- fscache_stat(&fscache_n_retrievals);
42234+ fscache_stat_unchecked(&fscache_n_retrievals);
42235
42236 if (hlist_empty(&cookie->backing_objects))
42237 goto nobufs;
42238@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
42239 goto nobufs_unlock;
42240 spin_unlock(&cookie->lock);
42241
42242- fscache_stat(&fscache_n_retrieval_ops);
42243+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
42244
42245 /* pin the netfs read context in case we need to do the actual netfs
42246 * read because we've encountered a cache read failure */
42247@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
42248
42249 error:
42250 if (ret == -ENOMEM)
42251- fscache_stat(&fscache_n_retrievals_nomem);
42252+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42253 else if (ret == -ERESTARTSYS)
42254- fscache_stat(&fscache_n_retrievals_intr);
42255+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42256 else if (ret == -ENODATA)
42257- fscache_stat(&fscache_n_retrievals_nodata);
42258+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42259 else if (ret < 0)
42260- fscache_stat(&fscache_n_retrievals_nobufs);
42261+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42262 else
42263- fscache_stat(&fscache_n_retrievals_ok);
42264+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
42265
42266 fscache_put_retrieval(op);
42267 _leave(" = %d", ret);
42268@@ -570,7 +570,7 @@ nobufs_unlock:
42269 spin_unlock(&cookie->lock);
42270 kfree(op);
42271 nobufs:
42272- fscache_stat(&fscache_n_retrievals_nobufs);
42273+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42274 _leave(" = -ENOBUFS");
42275 return -ENOBUFS;
42276 }
42277@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
42278
42279 _enter("%p,%p,,,", cookie, page);
42280
42281- fscache_stat(&fscache_n_allocs);
42282+ fscache_stat_unchecked(&fscache_n_allocs);
42283
42284 if (hlist_empty(&cookie->backing_objects))
42285 goto nobufs;
42286@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
42287 goto nobufs_unlock;
42288 spin_unlock(&cookie->lock);
42289
42290- fscache_stat(&fscache_n_alloc_ops);
42291+ fscache_stat_unchecked(&fscache_n_alloc_ops);
42292
42293 ret = fscache_wait_for_retrieval_activation(
42294 object, op,
42295@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
42296
42297 error:
42298 if (ret == -ERESTARTSYS)
42299- fscache_stat(&fscache_n_allocs_intr);
42300+ fscache_stat_unchecked(&fscache_n_allocs_intr);
42301 else if (ret < 0)
42302- fscache_stat(&fscache_n_allocs_nobufs);
42303+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42304 else
42305- fscache_stat(&fscache_n_allocs_ok);
42306+ fscache_stat_unchecked(&fscache_n_allocs_ok);
42307
42308 fscache_put_retrieval(op);
42309 _leave(" = %d", ret);
42310@@ -651,7 +651,7 @@ nobufs_unlock:
42311 spin_unlock(&cookie->lock);
42312 kfree(op);
42313 nobufs:
42314- fscache_stat(&fscache_n_allocs_nobufs);
42315+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42316 _leave(" = -ENOBUFS");
42317 return -ENOBUFS;
42318 }
42319@@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
42320
42321 spin_lock(&cookie->stores_lock);
42322
42323- fscache_stat(&fscache_n_store_calls);
42324+ fscache_stat_unchecked(&fscache_n_store_calls);
42325
42326 /* find a page to store */
42327 page = NULL;
42328@@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
42329 page = results[0];
42330 _debug("gang %d [%lx]", n, page->index);
42331 if (page->index > op->store_limit) {
42332- fscache_stat(&fscache_n_store_pages_over_limit);
42333+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
42334 goto superseded;
42335 }
42336
42337@@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
42338
42339 if (page) {
42340 fscache_set_op_state(&op->op, "Store");
42341- fscache_stat(&fscache_n_store_pages);
42342+ fscache_stat_unchecked(&fscache_n_store_pages);
42343 fscache_stat(&fscache_n_cop_write_page);
42344 ret = object->cache->ops->write_page(op, page);
42345 fscache_stat_d(&fscache_n_cop_write_page);
42346@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
42347 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42348 ASSERT(PageFsCache(page));
42349
42350- fscache_stat(&fscache_n_stores);
42351+ fscache_stat_unchecked(&fscache_n_stores);
42352
42353 op = kzalloc(sizeof(*op), GFP_NOIO);
42354 if (!op)
42355@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
42356 spin_unlock(&cookie->stores_lock);
42357 spin_unlock(&object->lock);
42358
42359- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
42360+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
42361 op->store_limit = object->store_limit;
42362
42363 if (fscache_submit_op(object, &op->op) < 0)
42364@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
42365
42366 spin_unlock(&cookie->lock);
42367 radix_tree_preload_end();
42368- fscache_stat(&fscache_n_store_ops);
42369- fscache_stat(&fscache_n_stores_ok);
42370+ fscache_stat_unchecked(&fscache_n_store_ops);
42371+ fscache_stat_unchecked(&fscache_n_stores_ok);
42372
42373 /* the slow work queue now carries its own ref on the object */
42374 fscache_put_operation(&op->op);
42375@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
42376 return 0;
42377
42378 already_queued:
42379- fscache_stat(&fscache_n_stores_again);
42380+ fscache_stat_unchecked(&fscache_n_stores_again);
42381 already_pending:
42382 spin_unlock(&cookie->stores_lock);
42383 spin_unlock(&object->lock);
42384 spin_unlock(&cookie->lock);
42385 radix_tree_preload_end();
42386 kfree(op);
42387- fscache_stat(&fscache_n_stores_ok);
42388+ fscache_stat_unchecked(&fscache_n_stores_ok);
42389 _leave(" = 0");
42390 return 0;
42391
42392@@ -886,14 +886,14 @@ nobufs:
42393 spin_unlock(&cookie->lock);
42394 radix_tree_preload_end();
42395 kfree(op);
42396- fscache_stat(&fscache_n_stores_nobufs);
42397+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
42398 _leave(" = -ENOBUFS");
42399 return -ENOBUFS;
42400
42401 nomem_free:
42402 kfree(op);
42403 nomem:
42404- fscache_stat(&fscache_n_stores_oom);
42405+ fscache_stat_unchecked(&fscache_n_stores_oom);
42406 _leave(" = -ENOMEM");
42407 return -ENOMEM;
42408 }
42409@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
42410 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42411 ASSERTCMP(page, !=, NULL);
42412
42413- fscache_stat(&fscache_n_uncaches);
42414+ fscache_stat_unchecked(&fscache_n_uncaches);
42415
42416 /* cache withdrawal may beat us to it */
42417 if (!PageFsCache(page))
42418@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
42419 unsigned long loop;
42420
42421 #ifdef CONFIG_FSCACHE_STATS
42422- atomic_add(pagevec->nr, &fscache_n_marks);
42423+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
42424 #endif
42425
42426 for (loop = 0; loop < pagevec->nr; loop++) {
42427diff -urNp linux-2.6.32.46/fs/fscache/stats.c linux-2.6.32.46/fs/fscache/stats.c
42428--- linux-2.6.32.46/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
42429+++ linux-2.6.32.46/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
42430@@ -18,95 +18,95 @@
42431 /*
42432 * operation counters
42433 */
42434-atomic_t fscache_n_op_pend;
42435-atomic_t fscache_n_op_run;
42436-atomic_t fscache_n_op_enqueue;
42437-atomic_t fscache_n_op_requeue;
42438-atomic_t fscache_n_op_deferred_release;
42439-atomic_t fscache_n_op_release;
42440-atomic_t fscache_n_op_gc;
42441-atomic_t fscache_n_op_cancelled;
42442-atomic_t fscache_n_op_rejected;
42443-
42444-atomic_t fscache_n_attr_changed;
42445-atomic_t fscache_n_attr_changed_ok;
42446-atomic_t fscache_n_attr_changed_nobufs;
42447-atomic_t fscache_n_attr_changed_nomem;
42448-atomic_t fscache_n_attr_changed_calls;
42449-
42450-atomic_t fscache_n_allocs;
42451-atomic_t fscache_n_allocs_ok;
42452-atomic_t fscache_n_allocs_wait;
42453-atomic_t fscache_n_allocs_nobufs;
42454-atomic_t fscache_n_allocs_intr;
42455-atomic_t fscache_n_allocs_object_dead;
42456-atomic_t fscache_n_alloc_ops;
42457-atomic_t fscache_n_alloc_op_waits;
42458-
42459-atomic_t fscache_n_retrievals;
42460-atomic_t fscache_n_retrievals_ok;
42461-atomic_t fscache_n_retrievals_wait;
42462-atomic_t fscache_n_retrievals_nodata;
42463-atomic_t fscache_n_retrievals_nobufs;
42464-atomic_t fscache_n_retrievals_intr;
42465-atomic_t fscache_n_retrievals_nomem;
42466-atomic_t fscache_n_retrievals_object_dead;
42467-atomic_t fscache_n_retrieval_ops;
42468-atomic_t fscache_n_retrieval_op_waits;
42469-
42470-atomic_t fscache_n_stores;
42471-atomic_t fscache_n_stores_ok;
42472-atomic_t fscache_n_stores_again;
42473-atomic_t fscache_n_stores_nobufs;
42474-atomic_t fscache_n_stores_oom;
42475-atomic_t fscache_n_store_ops;
42476-atomic_t fscache_n_store_calls;
42477-atomic_t fscache_n_store_pages;
42478-atomic_t fscache_n_store_radix_deletes;
42479-atomic_t fscache_n_store_pages_over_limit;
42480-
42481-atomic_t fscache_n_store_vmscan_not_storing;
42482-atomic_t fscache_n_store_vmscan_gone;
42483-atomic_t fscache_n_store_vmscan_busy;
42484-atomic_t fscache_n_store_vmscan_cancelled;
42485-
42486-atomic_t fscache_n_marks;
42487-atomic_t fscache_n_uncaches;
42488-
42489-atomic_t fscache_n_acquires;
42490-atomic_t fscache_n_acquires_null;
42491-atomic_t fscache_n_acquires_no_cache;
42492-atomic_t fscache_n_acquires_ok;
42493-atomic_t fscache_n_acquires_nobufs;
42494-atomic_t fscache_n_acquires_oom;
42495-
42496-atomic_t fscache_n_updates;
42497-atomic_t fscache_n_updates_null;
42498-atomic_t fscache_n_updates_run;
42499-
42500-atomic_t fscache_n_relinquishes;
42501-atomic_t fscache_n_relinquishes_null;
42502-atomic_t fscache_n_relinquishes_waitcrt;
42503-atomic_t fscache_n_relinquishes_retire;
42504-
42505-atomic_t fscache_n_cookie_index;
42506-atomic_t fscache_n_cookie_data;
42507-atomic_t fscache_n_cookie_special;
42508-
42509-atomic_t fscache_n_object_alloc;
42510-atomic_t fscache_n_object_no_alloc;
42511-atomic_t fscache_n_object_lookups;
42512-atomic_t fscache_n_object_lookups_negative;
42513-atomic_t fscache_n_object_lookups_positive;
42514-atomic_t fscache_n_object_lookups_timed_out;
42515-atomic_t fscache_n_object_created;
42516-atomic_t fscache_n_object_avail;
42517-atomic_t fscache_n_object_dead;
42518-
42519-atomic_t fscache_n_checkaux_none;
42520-atomic_t fscache_n_checkaux_okay;
42521-atomic_t fscache_n_checkaux_update;
42522-atomic_t fscache_n_checkaux_obsolete;
42523+atomic_unchecked_t fscache_n_op_pend;
42524+atomic_unchecked_t fscache_n_op_run;
42525+atomic_unchecked_t fscache_n_op_enqueue;
42526+atomic_unchecked_t fscache_n_op_requeue;
42527+atomic_unchecked_t fscache_n_op_deferred_release;
42528+atomic_unchecked_t fscache_n_op_release;
42529+atomic_unchecked_t fscache_n_op_gc;
42530+atomic_unchecked_t fscache_n_op_cancelled;
42531+atomic_unchecked_t fscache_n_op_rejected;
42532+
42533+atomic_unchecked_t fscache_n_attr_changed;
42534+atomic_unchecked_t fscache_n_attr_changed_ok;
42535+atomic_unchecked_t fscache_n_attr_changed_nobufs;
42536+atomic_unchecked_t fscache_n_attr_changed_nomem;
42537+atomic_unchecked_t fscache_n_attr_changed_calls;
42538+
42539+atomic_unchecked_t fscache_n_allocs;
42540+atomic_unchecked_t fscache_n_allocs_ok;
42541+atomic_unchecked_t fscache_n_allocs_wait;
42542+atomic_unchecked_t fscache_n_allocs_nobufs;
42543+atomic_unchecked_t fscache_n_allocs_intr;
42544+atomic_unchecked_t fscache_n_allocs_object_dead;
42545+atomic_unchecked_t fscache_n_alloc_ops;
42546+atomic_unchecked_t fscache_n_alloc_op_waits;
42547+
42548+atomic_unchecked_t fscache_n_retrievals;
42549+atomic_unchecked_t fscache_n_retrievals_ok;
42550+atomic_unchecked_t fscache_n_retrievals_wait;
42551+atomic_unchecked_t fscache_n_retrievals_nodata;
42552+atomic_unchecked_t fscache_n_retrievals_nobufs;
42553+atomic_unchecked_t fscache_n_retrievals_intr;
42554+atomic_unchecked_t fscache_n_retrievals_nomem;
42555+atomic_unchecked_t fscache_n_retrievals_object_dead;
42556+atomic_unchecked_t fscache_n_retrieval_ops;
42557+atomic_unchecked_t fscache_n_retrieval_op_waits;
42558+
42559+atomic_unchecked_t fscache_n_stores;
42560+atomic_unchecked_t fscache_n_stores_ok;
42561+atomic_unchecked_t fscache_n_stores_again;
42562+atomic_unchecked_t fscache_n_stores_nobufs;
42563+atomic_unchecked_t fscache_n_stores_oom;
42564+atomic_unchecked_t fscache_n_store_ops;
42565+atomic_unchecked_t fscache_n_store_calls;
42566+atomic_unchecked_t fscache_n_store_pages;
42567+atomic_unchecked_t fscache_n_store_radix_deletes;
42568+atomic_unchecked_t fscache_n_store_pages_over_limit;
42569+
42570+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42571+atomic_unchecked_t fscache_n_store_vmscan_gone;
42572+atomic_unchecked_t fscache_n_store_vmscan_busy;
42573+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42574+
42575+atomic_unchecked_t fscache_n_marks;
42576+atomic_unchecked_t fscache_n_uncaches;
42577+
42578+atomic_unchecked_t fscache_n_acquires;
42579+atomic_unchecked_t fscache_n_acquires_null;
42580+atomic_unchecked_t fscache_n_acquires_no_cache;
42581+atomic_unchecked_t fscache_n_acquires_ok;
42582+atomic_unchecked_t fscache_n_acquires_nobufs;
42583+atomic_unchecked_t fscache_n_acquires_oom;
42584+
42585+atomic_unchecked_t fscache_n_updates;
42586+atomic_unchecked_t fscache_n_updates_null;
42587+atomic_unchecked_t fscache_n_updates_run;
42588+
42589+atomic_unchecked_t fscache_n_relinquishes;
42590+atomic_unchecked_t fscache_n_relinquishes_null;
42591+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42592+atomic_unchecked_t fscache_n_relinquishes_retire;
42593+
42594+atomic_unchecked_t fscache_n_cookie_index;
42595+atomic_unchecked_t fscache_n_cookie_data;
42596+atomic_unchecked_t fscache_n_cookie_special;
42597+
42598+atomic_unchecked_t fscache_n_object_alloc;
42599+atomic_unchecked_t fscache_n_object_no_alloc;
42600+atomic_unchecked_t fscache_n_object_lookups;
42601+atomic_unchecked_t fscache_n_object_lookups_negative;
42602+atomic_unchecked_t fscache_n_object_lookups_positive;
42603+atomic_unchecked_t fscache_n_object_lookups_timed_out;
42604+atomic_unchecked_t fscache_n_object_created;
42605+atomic_unchecked_t fscache_n_object_avail;
42606+atomic_unchecked_t fscache_n_object_dead;
42607+
42608+atomic_unchecked_t fscache_n_checkaux_none;
42609+atomic_unchecked_t fscache_n_checkaux_okay;
42610+atomic_unchecked_t fscache_n_checkaux_update;
42611+atomic_unchecked_t fscache_n_checkaux_obsolete;
42612
42613 atomic_t fscache_n_cop_alloc_object;
42614 atomic_t fscache_n_cop_lookup_object;
42615@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
42616 seq_puts(m, "FS-Cache statistics\n");
42617
42618 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
42619- atomic_read(&fscache_n_cookie_index),
42620- atomic_read(&fscache_n_cookie_data),
42621- atomic_read(&fscache_n_cookie_special));
42622+ atomic_read_unchecked(&fscache_n_cookie_index),
42623+ atomic_read_unchecked(&fscache_n_cookie_data),
42624+ atomic_read_unchecked(&fscache_n_cookie_special));
42625
42626 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
42627- atomic_read(&fscache_n_object_alloc),
42628- atomic_read(&fscache_n_object_no_alloc),
42629- atomic_read(&fscache_n_object_avail),
42630- atomic_read(&fscache_n_object_dead));
42631+ atomic_read_unchecked(&fscache_n_object_alloc),
42632+ atomic_read_unchecked(&fscache_n_object_no_alloc),
42633+ atomic_read_unchecked(&fscache_n_object_avail),
42634+ atomic_read_unchecked(&fscache_n_object_dead));
42635 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
42636- atomic_read(&fscache_n_checkaux_none),
42637- atomic_read(&fscache_n_checkaux_okay),
42638- atomic_read(&fscache_n_checkaux_update),
42639- atomic_read(&fscache_n_checkaux_obsolete));
42640+ atomic_read_unchecked(&fscache_n_checkaux_none),
42641+ atomic_read_unchecked(&fscache_n_checkaux_okay),
42642+ atomic_read_unchecked(&fscache_n_checkaux_update),
42643+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
42644
42645 seq_printf(m, "Pages : mrk=%u unc=%u\n",
42646- atomic_read(&fscache_n_marks),
42647- atomic_read(&fscache_n_uncaches));
42648+ atomic_read_unchecked(&fscache_n_marks),
42649+ atomic_read_unchecked(&fscache_n_uncaches));
42650
42651 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
42652 " oom=%u\n",
42653- atomic_read(&fscache_n_acquires),
42654- atomic_read(&fscache_n_acquires_null),
42655- atomic_read(&fscache_n_acquires_no_cache),
42656- atomic_read(&fscache_n_acquires_ok),
42657- atomic_read(&fscache_n_acquires_nobufs),
42658- atomic_read(&fscache_n_acquires_oom));
42659+ atomic_read_unchecked(&fscache_n_acquires),
42660+ atomic_read_unchecked(&fscache_n_acquires_null),
42661+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
42662+ atomic_read_unchecked(&fscache_n_acquires_ok),
42663+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
42664+ atomic_read_unchecked(&fscache_n_acquires_oom));
42665
42666 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
42667- atomic_read(&fscache_n_object_lookups),
42668- atomic_read(&fscache_n_object_lookups_negative),
42669- atomic_read(&fscache_n_object_lookups_positive),
42670- atomic_read(&fscache_n_object_lookups_timed_out),
42671- atomic_read(&fscache_n_object_created));
42672+ atomic_read_unchecked(&fscache_n_object_lookups),
42673+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
42674+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
42675+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
42676+ atomic_read_unchecked(&fscache_n_object_created));
42677
42678 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
42679- atomic_read(&fscache_n_updates),
42680- atomic_read(&fscache_n_updates_null),
42681- atomic_read(&fscache_n_updates_run));
42682+ atomic_read_unchecked(&fscache_n_updates),
42683+ atomic_read_unchecked(&fscache_n_updates_null),
42684+ atomic_read_unchecked(&fscache_n_updates_run));
42685
42686 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
42687- atomic_read(&fscache_n_relinquishes),
42688- atomic_read(&fscache_n_relinquishes_null),
42689- atomic_read(&fscache_n_relinquishes_waitcrt),
42690- atomic_read(&fscache_n_relinquishes_retire));
42691+ atomic_read_unchecked(&fscache_n_relinquishes),
42692+ atomic_read_unchecked(&fscache_n_relinquishes_null),
42693+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
42694+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
42695
42696 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
42697- atomic_read(&fscache_n_attr_changed),
42698- atomic_read(&fscache_n_attr_changed_ok),
42699- atomic_read(&fscache_n_attr_changed_nobufs),
42700- atomic_read(&fscache_n_attr_changed_nomem),
42701- atomic_read(&fscache_n_attr_changed_calls));
42702+ atomic_read_unchecked(&fscache_n_attr_changed),
42703+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
42704+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
42705+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
42706+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
42707
42708 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
42709- atomic_read(&fscache_n_allocs),
42710- atomic_read(&fscache_n_allocs_ok),
42711- atomic_read(&fscache_n_allocs_wait),
42712- atomic_read(&fscache_n_allocs_nobufs),
42713- atomic_read(&fscache_n_allocs_intr));
42714+ atomic_read_unchecked(&fscache_n_allocs),
42715+ atomic_read_unchecked(&fscache_n_allocs_ok),
42716+ atomic_read_unchecked(&fscache_n_allocs_wait),
42717+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
42718+ atomic_read_unchecked(&fscache_n_allocs_intr));
42719 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
42720- atomic_read(&fscache_n_alloc_ops),
42721- atomic_read(&fscache_n_alloc_op_waits),
42722- atomic_read(&fscache_n_allocs_object_dead));
42723+ atomic_read_unchecked(&fscache_n_alloc_ops),
42724+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
42725+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
42726
42727 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
42728 " int=%u oom=%u\n",
42729- atomic_read(&fscache_n_retrievals),
42730- atomic_read(&fscache_n_retrievals_ok),
42731- atomic_read(&fscache_n_retrievals_wait),
42732- atomic_read(&fscache_n_retrievals_nodata),
42733- atomic_read(&fscache_n_retrievals_nobufs),
42734- atomic_read(&fscache_n_retrievals_intr),
42735- atomic_read(&fscache_n_retrievals_nomem));
42736+ atomic_read_unchecked(&fscache_n_retrievals),
42737+ atomic_read_unchecked(&fscache_n_retrievals_ok),
42738+ atomic_read_unchecked(&fscache_n_retrievals_wait),
42739+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
42740+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
42741+ atomic_read_unchecked(&fscache_n_retrievals_intr),
42742+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
42743 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
42744- atomic_read(&fscache_n_retrieval_ops),
42745- atomic_read(&fscache_n_retrieval_op_waits),
42746- atomic_read(&fscache_n_retrievals_object_dead));
42747+ atomic_read_unchecked(&fscache_n_retrieval_ops),
42748+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
42749+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
42750
42751 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
42752- atomic_read(&fscache_n_stores),
42753- atomic_read(&fscache_n_stores_ok),
42754- atomic_read(&fscache_n_stores_again),
42755- atomic_read(&fscache_n_stores_nobufs),
42756- atomic_read(&fscache_n_stores_oom));
42757+ atomic_read_unchecked(&fscache_n_stores),
42758+ atomic_read_unchecked(&fscache_n_stores_ok),
42759+ atomic_read_unchecked(&fscache_n_stores_again),
42760+ atomic_read_unchecked(&fscache_n_stores_nobufs),
42761+ atomic_read_unchecked(&fscache_n_stores_oom));
42762 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
42763- atomic_read(&fscache_n_store_ops),
42764- atomic_read(&fscache_n_store_calls),
42765- atomic_read(&fscache_n_store_pages),
42766- atomic_read(&fscache_n_store_radix_deletes),
42767- atomic_read(&fscache_n_store_pages_over_limit));
42768+ atomic_read_unchecked(&fscache_n_store_ops),
42769+ atomic_read_unchecked(&fscache_n_store_calls),
42770+ atomic_read_unchecked(&fscache_n_store_pages),
42771+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
42772+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
42773
42774 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
42775- atomic_read(&fscache_n_store_vmscan_not_storing),
42776- atomic_read(&fscache_n_store_vmscan_gone),
42777- atomic_read(&fscache_n_store_vmscan_busy),
42778- atomic_read(&fscache_n_store_vmscan_cancelled));
42779+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
42780+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
42781+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
42782+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
42783
42784 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
42785- atomic_read(&fscache_n_op_pend),
42786- atomic_read(&fscache_n_op_run),
42787- atomic_read(&fscache_n_op_enqueue),
42788- atomic_read(&fscache_n_op_cancelled),
42789- atomic_read(&fscache_n_op_rejected));
42790+ atomic_read_unchecked(&fscache_n_op_pend),
42791+ atomic_read_unchecked(&fscache_n_op_run),
42792+ atomic_read_unchecked(&fscache_n_op_enqueue),
42793+ atomic_read_unchecked(&fscache_n_op_cancelled),
42794+ atomic_read_unchecked(&fscache_n_op_rejected));
42795 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
42796- atomic_read(&fscache_n_op_deferred_release),
42797- atomic_read(&fscache_n_op_release),
42798- atomic_read(&fscache_n_op_gc));
42799+ atomic_read_unchecked(&fscache_n_op_deferred_release),
42800+ atomic_read_unchecked(&fscache_n_op_release),
42801+ atomic_read_unchecked(&fscache_n_op_gc));
42802
42803 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
42804 atomic_read(&fscache_n_cop_alloc_object),
42805diff -urNp linux-2.6.32.46/fs/fs_struct.c linux-2.6.32.46/fs/fs_struct.c
42806--- linux-2.6.32.46/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
42807+++ linux-2.6.32.46/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
42808@@ -4,6 +4,7 @@
42809 #include <linux/path.h>
42810 #include <linux/slab.h>
42811 #include <linux/fs_struct.h>
42812+#include <linux/grsecurity.h>
42813
42814 /*
42815 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
42816@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
42817 old_root = fs->root;
42818 fs->root = *path;
42819 path_get(path);
42820+ gr_set_chroot_entries(current, path);
42821 write_unlock(&fs->lock);
42822 if (old_root.dentry)
42823 path_put(&old_root);
42824@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
42825 && fs->root.mnt == old_root->mnt) {
42826 path_get(new_root);
42827 fs->root = *new_root;
42828+ gr_set_chroot_entries(p, new_root);
42829 count++;
42830 }
42831 if (fs->pwd.dentry == old_root->dentry
42832@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
42833 task_lock(tsk);
42834 write_lock(&fs->lock);
42835 tsk->fs = NULL;
42836- kill = !--fs->users;
42837+ gr_clear_chroot_entries(tsk);
42838+ kill = !atomic_dec_return(&fs->users);
42839 write_unlock(&fs->lock);
42840 task_unlock(tsk);
42841 if (kill)
42842@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
42843 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42844 /* We don't need to lock fs - think why ;-) */
42845 if (fs) {
42846- fs->users = 1;
42847+ atomic_set(&fs->users, 1);
42848 fs->in_exec = 0;
42849 rwlock_init(&fs->lock);
42850 fs->umask = old->umask;
42851@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
42852
42853 task_lock(current);
42854 write_lock(&fs->lock);
42855- kill = !--fs->users;
42856+ kill = !atomic_dec_return(&fs->users);
42857 current->fs = new_fs;
42858+ gr_set_chroot_entries(current, &new_fs->root);
42859 write_unlock(&fs->lock);
42860 task_unlock(current);
42861
42862@@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
42863
42864 /* to be mentioned only in INIT_TASK */
42865 struct fs_struct init_fs = {
42866- .users = 1,
42867+ .users = ATOMIC_INIT(1),
42868 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
42869 .umask = 0022,
42870 };
42871@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
42872 task_lock(current);
42873
42874 write_lock(&init_fs.lock);
42875- init_fs.users++;
42876+ atomic_inc(&init_fs.users);
42877 write_unlock(&init_fs.lock);
42878
42879 write_lock(&fs->lock);
42880 current->fs = &init_fs;
42881- kill = !--fs->users;
42882+ gr_set_chroot_entries(current, &current->fs->root);
42883+ kill = !atomic_dec_return(&fs->users);
42884 write_unlock(&fs->lock);
42885
42886 task_unlock(current);
42887diff -urNp linux-2.6.32.46/fs/fuse/cuse.c linux-2.6.32.46/fs/fuse/cuse.c
42888--- linux-2.6.32.46/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
42889+++ linux-2.6.32.46/fs/fuse/cuse.c 2011-08-05 20:33:55.000000000 -0400
42890@@ -576,10 +576,12 @@ static int __init cuse_init(void)
42891 INIT_LIST_HEAD(&cuse_conntbl[i]);
42892
42893 /* inherit and extend fuse_dev_operations */
42894- cuse_channel_fops = fuse_dev_operations;
42895- cuse_channel_fops.owner = THIS_MODULE;
42896- cuse_channel_fops.open = cuse_channel_open;
42897- cuse_channel_fops.release = cuse_channel_release;
42898+ pax_open_kernel();
42899+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
42900+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
42901+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
42902+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
42903+ pax_close_kernel();
42904
42905 cuse_class = class_create(THIS_MODULE, "cuse");
42906 if (IS_ERR(cuse_class))
42907diff -urNp linux-2.6.32.46/fs/fuse/dev.c linux-2.6.32.46/fs/fuse/dev.c
42908--- linux-2.6.32.46/fs/fuse/dev.c 2011-08-29 22:24:44.000000000 -0400
42909+++ linux-2.6.32.46/fs/fuse/dev.c 2011-08-29 22:25:07.000000000 -0400
42910@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struc
42911 {
42912 struct fuse_notify_inval_entry_out outarg;
42913 int err = -EINVAL;
42914- char buf[FUSE_NAME_MAX+1];
42915+ char *buf = NULL;
42916 struct qstr name;
42917
42918 if (size < sizeof(outarg))
42919@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struc
42920 if (outarg.namelen > FUSE_NAME_MAX)
42921 goto err;
42922
42923+ err = -ENOMEM;
42924+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
42925+ if (!buf)
42926+ goto err;
42927+
42928 err = -EINVAL;
42929 if (size != sizeof(outarg) + outarg.namelen + 1)
42930 goto err;
42931@@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struc
42932
42933 down_read(&fc->killsb);
42934 err = -ENOENT;
42935- if (!fc->sb)
42936- goto err_unlock;
42937-
42938- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
42939-
42940-err_unlock:
42941+ if (fc->sb)
42942+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
42943 up_read(&fc->killsb);
42944+ kfree(buf);
42945 return err;
42946
42947 err:
42948 fuse_copy_finish(cs);
42949+ kfree(buf);
42950 return err;
42951 }
42952
42953diff -urNp linux-2.6.32.46/fs/fuse/dir.c linux-2.6.32.46/fs/fuse/dir.c
42954--- linux-2.6.32.46/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
42955+++ linux-2.6.32.46/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
42956@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
42957 return link;
42958 }
42959
42960-static void free_link(char *link)
42961+static void free_link(const char *link)
42962 {
42963 if (!IS_ERR(link))
42964 free_page((unsigned long) link);
42965diff -urNp linux-2.6.32.46/fs/gfs2/ops_inode.c linux-2.6.32.46/fs/gfs2/ops_inode.c
42966--- linux-2.6.32.46/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
42967+++ linux-2.6.32.46/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
42968@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
42969 unsigned int x;
42970 int error;
42971
42972+ pax_track_stack();
42973+
42974 if (ndentry->d_inode) {
42975 nip = GFS2_I(ndentry->d_inode);
42976 if (ip == nip)
42977diff -urNp linux-2.6.32.46/fs/gfs2/sys.c linux-2.6.32.46/fs/gfs2/sys.c
42978--- linux-2.6.32.46/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
42979+++ linux-2.6.32.46/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
42980@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
42981 return a->store ? a->store(sdp, buf, len) : len;
42982 }
42983
42984-static struct sysfs_ops gfs2_attr_ops = {
42985+static const struct sysfs_ops gfs2_attr_ops = {
42986 .show = gfs2_attr_show,
42987 .store = gfs2_attr_store,
42988 };
42989@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
42990 return 0;
42991 }
42992
42993-static struct kset_uevent_ops gfs2_uevent_ops = {
42994+static const struct kset_uevent_ops gfs2_uevent_ops = {
42995 .uevent = gfs2_uevent,
42996 };
42997
42998diff -urNp linux-2.6.32.46/fs/hfsplus/catalog.c linux-2.6.32.46/fs/hfsplus/catalog.c
42999--- linux-2.6.32.46/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
43000+++ linux-2.6.32.46/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
43001@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
43002 int err;
43003 u16 type;
43004
43005+ pax_track_stack();
43006+
43007 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
43008 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
43009 if (err)
43010@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
43011 int entry_size;
43012 int err;
43013
43014+ pax_track_stack();
43015+
43016 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
43017 sb = dir->i_sb;
43018 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
43019@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
43020 int entry_size, type;
43021 int err = 0;
43022
43023+ pax_track_stack();
43024+
43025 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
43026 dst_dir->i_ino, dst_name->name);
43027 sb = src_dir->i_sb;
43028diff -urNp linux-2.6.32.46/fs/hfsplus/dir.c linux-2.6.32.46/fs/hfsplus/dir.c
43029--- linux-2.6.32.46/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
43030+++ linux-2.6.32.46/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
43031@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
43032 struct hfsplus_readdir_data *rd;
43033 u16 type;
43034
43035+ pax_track_stack();
43036+
43037 if (filp->f_pos >= inode->i_size)
43038 return 0;
43039
43040diff -urNp linux-2.6.32.46/fs/hfsplus/inode.c linux-2.6.32.46/fs/hfsplus/inode.c
43041--- linux-2.6.32.46/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
43042+++ linux-2.6.32.46/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
43043@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
43044 int res = 0;
43045 u16 type;
43046
43047+ pax_track_stack();
43048+
43049 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
43050
43051 HFSPLUS_I(inode).dev = 0;
43052@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
43053 struct hfs_find_data fd;
43054 hfsplus_cat_entry entry;
43055
43056+ pax_track_stack();
43057+
43058 if (HFSPLUS_IS_RSRC(inode))
43059 main_inode = HFSPLUS_I(inode).rsrc_inode;
43060
43061diff -urNp linux-2.6.32.46/fs/hfsplus/ioctl.c linux-2.6.32.46/fs/hfsplus/ioctl.c
43062--- linux-2.6.32.46/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43063+++ linux-2.6.32.46/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
43064@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
43065 struct hfsplus_cat_file *file;
43066 int res;
43067
43068+ pax_track_stack();
43069+
43070 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43071 return -EOPNOTSUPP;
43072
43073@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
43074 struct hfsplus_cat_file *file;
43075 ssize_t res = 0;
43076
43077+ pax_track_stack();
43078+
43079 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43080 return -EOPNOTSUPP;
43081
43082diff -urNp linux-2.6.32.46/fs/hfsplus/super.c linux-2.6.32.46/fs/hfsplus/super.c
43083--- linux-2.6.32.46/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
43084+++ linux-2.6.32.46/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
43085@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
43086 struct nls_table *nls = NULL;
43087 int err = -EINVAL;
43088
43089+ pax_track_stack();
43090+
43091 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
43092 if (!sbi)
43093 return -ENOMEM;
43094diff -urNp linux-2.6.32.46/fs/hugetlbfs/inode.c linux-2.6.32.46/fs/hugetlbfs/inode.c
43095--- linux-2.6.32.46/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
43096+++ linux-2.6.32.46/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
43097@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
43098 .kill_sb = kill_litter_super,
43099 };
43100
43101-static struct vfsmount *hugetlbfs_vfsmount;
43102+struct vfsmount *hugetlbfs_vfsmount;
43103
43104 static int can_do_hugetlb_shm(void)
43105 {
43106diff -urNp linux-2.6.32.46/fs/ioctl.c linux-2.6.32.46/fs/ioctl.c
43107--- linux-2.6.32.46/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43108+++ linux-2.6.32.46/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
43109@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
43110 u64 phys, u64 len, u32 flags)
43111 {
43112 struct fiemap_extent extent;
43113- struct fiemap_extent *dest = fieinfo->fi_extents_start;
43114+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
43115
43116 /* only count the extents */
43117 if (fieinfo->fi_extents_max == 0) {
43118@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
43119
43120 fieinfo.fi_flags = fiemap.fm_flags;
43121 fieinfo.fi_extents_max = fiemap.fm_extent_count;
43122- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
43123+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
43124
43125 if (fiemap.fm_extent_count != 0 &&
43126 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
43127@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
43128 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
43129 fiemap.fm_flags = fieinfo.fi_flags;
43130 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
43131- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
43132+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
43133 error = -EFAULT;
43134
43135 return error;
43136diff -urNp linux-2.6.32.46/fs/jbd/checkpoint.c linux-2.6.32.46/fs/jbd/checkpoint.c
43137--- linux-2.6.32.46/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
43138+++ linux-2.6.32.46/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
43139@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
43140 tid_t this_tid;
43141 int result;
43142
43143+ pax_track_stack();
43144+
43145 jbd_debug(1, "Start checkpoint\n");
43146
43147 /*
43148diff -urNp linux-2.6.32.46/fs/jffs2/compr_rtime.c linux-2.6.32.46/fs/jffs2/compr_rtime.c
43149--- linux-2.6.32.46/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
43150+++ linux-2.6.32.46/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
43151@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
43152 int outpos = 0;
43153 int pos=0;
43154
43155+ pax_track_stack();
43156+
43157 memset(positions,0,sizeof(positions));
43158
43159 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
43160@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
43161 int outpos = 0;
43162 int pos=0;
43163
43164+ pax_track_stack();
43165+
43166 memset(positions,0,sizeof(positions));
43167
43168 while (outpos<destlen) {
43169diff -urNp linux-2.6.32.46/fs/jffs2/compr_rubin.c linux-2.6.32.46/fs/jffs2/compr_rubin.c
43170--- linux-2.6.32.46/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
43171+++ linux-2.6.32.46/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
43172@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
43173 int ret;
43174 uint32_t mysrclen, mydstlen;
43175
43176+ pax_track_stack();
43177+
43178 mysrclen = *sourcelen;
43179 mydstlen = *dstlen - 8;
43180
43181diff -urNp linux-2.6.32.46/fs/jffs2/erase.c linux-2.6.32.46/fs/jffs2/erase.c
43182--- linux-2.6.32.46/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
43183+++ linux-2.6.32.46/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
43184@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
43185 struct jffs2_unknown_node marker = {
43186 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
43187 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43188- .totlen = cpu_to_je32(c->cleanmarker_size)
43189+ .totlen = cpu_to_je32(c->cleanmarker_size),
43190+ .hdr_crc = cpu_to_je32(0)
43191 };
43192
43193 jffs2_prealloc_raw_node_refs(c, jeb, 1);
43194diff -urNp linux-2.6.32.46/fs/jffs2/wbuf.c linux-2.6.32.46/fs/jffs2/wbuf.c
43195--- linux-2.6.32.46/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
43196+++ linux-2.6.32.46/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
43197@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
43198 {
43199 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
43200 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43201- .totlen = constant_cpu_to_je32(8)
43202+ .totlen = constant_cpu_to_je32(8),
43203+ .hdr_crc = constant_cpu_to_je32(0)
43204 };
43205
43206 /*
43207diff -urNp linux-2.6.32.46/fs/jffs2/xattr.c linux-2.6.32.46/fs/jffs2/xattr.c
43208--- linux-2.6.32.46/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
43209+++ linux-2.6.32.46/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
43210@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
43211
43212 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
43213
43214+ pax_track_stack();
43215+
43216 /* Phase.1 : Merge same xref */
43217 for (i=0; i < XREF_TMPHASH_SIZE; i++)
43218 xref_tmphash[i] = NULL;
43219diff -urNp linux-2.6.32.46/fs/jfs/super.c linux-2.6.32.46/fs/jfs/super.c
43220--- linux-2.6.32.46/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
43221+++ linux-2.6.32.46/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
43222@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
43223
43224 jfs_inode_cachep =
43225 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
43226- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
43227+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
43228 init_once);
43229 if (jfs_inode_cachep == NULL)
43230 return -ENOMEM;
43231diff -urNp linux-2.6.32.46/fs/Kconfig.binfmt linux-2.6.32.46/fs/Kconfig.binfmt
43232--- linux-2.6.32.46/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
43233+++ linux-2.6.32.46/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
43234@@ -86,7 +86,7 @@ config HAVE_AOUT
43235
43236 config BINFMT_AOUT
43237 tristate "Kernel support for a.out and ECOFF binaries"
43238- depends on HAVE_AOUT
43239+ depends on HAVE_AOUT && BROKEN
43240 ---help---
43241 A.out (Assembler.OUTput) is a set of formats for libraries and
43242 executables used in the earliest versions of UNIX. Linux used
43243diff -urNp linux-2.6.32.46/fs/libfs.c linux-2.6.32.46/fs/libfs.c
43244--- linux-2.6.32.46/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
43245+++ linux-2.6.32.46/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
43246@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
43247
43248 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
43249 struct dentry *next;
43250+ char d_name[sizeof(next->d_iname)];
43251+ const unsigned char *name;
43252+
43253 next = list_entry(p, struct dentry, d_u.d_child);
43254 if (d_unhashed(next) || !next->d_inode)
43255 continue;
43256
43257 spin_unlock(&dcache_lock);
43258- if (filldir(dirent, next->d_name.name,
43259+ name = next->d_name.name;
43260+ if (name == next->d_iname) {
43261+ memcpy(d_name, name, next->d_name.len);
43262+ name = d_name;
43263+ }
43264+ if (filldir(dirent, name,
43265 next->d_name.len, filp->f_pos,
43266 next->d_inode->i_ino,
43267 dt_type(next->d_inode)) < 0)
43268diff -urNp linux-2.6.32.46/fs/lockd/clntproc.c linux-2.6.32.46/fs/lockd/clntproc.c
43269--- linux-2.6.32.46/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
43270+++ linux-2.6.32.46/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
43271@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
43272 /*
43273 * Cookie counter for NLM requests
43274 */
43275-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
43276+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
43277
43278 void nlmclnt_next_cookie(struct nlm_cookie *c)
43279 {
43280- u32 cookie = atomic_inc_return(&nlm_cookie);
43281+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
43282
43283 memcpy(c->data, &cookie, 4);
43284 c->len=4;
43285@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
43286 struct nlm_rqst reqst, *req;
43287 int status;
43288
43289+ pax_track_stack();
43290+
43291 req = &reqst;
43292 memset(req, 0, sizeof(*req));
43293 locks_init_lock(&req->a_args.lock.fl);
43294diff -urNp linux-2.6.32.46/fs/lockd/svc.c linux-2.6.32.46/fs/lockd/svc.c
43295--- linux-2.6.32.46/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
43296+++ linux-2.6.32.46/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
43297@@ -43,7 +43,7 @@
43298
43299 static struct svc_program nlmsvc_program;
43300
43301-struct nlmsvc_binding * nlmsvc_ops;
43302+const struct nlmsvc_binding * nlmsvc_ops;
43303 EXPORT_SYMBOL_GPL(nlmsvc_ops);
43304
43305 static DEFINE_MUTEX(nlmsvc_mutex);
43306diff -urNp linux-2.6.32.46/fs/locks.c linux-2.6.32.46/fs/locks.c
43307--- linux-2.6.32.46/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
43308+++ linux-2.6.32.46/fs/locks.c 2011-07-06 19:47:11.000000000 -0400
43309@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
43310
43311 static struct kmem_cache *filelock_cache __read_mostly;
43312
43313+static void locks_init_lock_always(struct file_lock *fl)
43314+{
43315+ fl->fl_next = NULL;
43316+ fl->fl_fasync = NULL;
43317+ fl->fl_owner = NULL;
43318+ fl->fl_pid = 0;
43319+ fl->fl_nspid = NULL;
43320+ fl->fl_file = NULL;
43321+ fl->fl_flags = 0;
43322+ fl->fl_type = 0;
43323+ fl->fl_start = fl->fl_end = 0;
43324+}
43325+
43326 /* Allocate an empty lock structure. */
43327 static struct file_lock *locks_alloc_lock(void)
43328 {
43329- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43330+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43331+
43332+ if (fl)
43333+ locks_init_lock_always(fl);
43334+
43335+ return fl;
43336 }
43337
43338 void locks_release_private(struct file_lock *fl)
43339@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *f
43340 INIT_LIST_HEAD(&fl->fl_link);
43341 INIT_LIST_HEAD(&fl->fl_block);
43342 init_waitqueue_head(&fl->fl_wait);
43343- fl->fl_next = NULL;
43344- fl->fl_fasync = NULL;
43345- fl->fl_owner = NULL;
43346- fl->fl_pid = 0;
43347- fl->fl_nspid = NULL;
43348- fl->fl_file = NULL;
43349- fl->fl_flags = 0;
43350- fl->fl_type = 0;
43351- fl->fl_start = fl->fl_end = 0;
43352 fl->fl_ops = NULL;
43353 fl->fl_lmops = NULL;
43354+ locks_init_lock_always(fl);
43355 }
43356
43357 EXPORT_SYMBOL(locks_init_lock);
43358@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *fil
43359 return;
43360
43361 if (filp->f_op && filp->f_op->flock) {
43362- struct file_lock fl = {
43363+ struct file_lock flock = {
43364 .fl_pid = current->tgid,
43365 .fl_file = filp,
43366 .fl_flags = FL_FLOCK,
43367 .fl_type = F_UNLCK,
43368 .fl_end = OFFSET_MAX,
43369 };
43370- filp->f_op->flock(filp, F_SETLKW, &fl);
43371- if (fl.fl_ops && fl.fl_ops->fl_release_private)
43372- fl.fl_ops->fl_release_private(&fl);
43373+ filp->f_op->flock(filp, F_SETLKW, &flock);
43374+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
43375+ flock.fl_ops->fl_release_private(&flock);
43376 }
43377
43378 lock_kernel();
43379diff -urNp linux-2.6.32.46/fs/mbcache.c linux-2.6.32.46/fs/mbcache.c
43380--- linux-2.6.32.46/fs/mbcache.c 2011-03-27 14:31:47.000000000 -0400
43381+++ linux-2.6.32.46/fs/mbcache.c 2011-08-05 20:33:55.000000000 -0400
43382@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct
43383 if (!cache)
43384 goto fail;
43385 cache->c_name = name;
43386- cache->c_op.free = NULL;
43387+ *(void **)&cache->c_op.free = NULL;
43388 if (cache_op)
43389- cache->c_op.free = cache_op->free;
43390+ *(void **)&cache->c_op.free = cache_op->free;
43391 atomic_set(&cache->c_entry_count, 0);
43392 cache->c_bucket_bits = bucket_bits;
43393 #ifdef MB_CACHE_INDEXES_COUNT
43394diff -urNp linux-2.6.32.46/fs/namei.c linux-2.6.32.46/fs/namei.c
43395--- linux-2.6.32.46/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
43396+++ linux-2.6.32.46/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
43397@@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
43398 return ret;
43399
43400 /*
43401- * Read/write DACs are always overridable.
43402- * Executable DACs are overridable if at least one exec bit is set.
43403- */
43404- if (!(mask & MAY_EXEC) || execute_ok(inode))
43405- if (capable(CAP_DAC_OVERRIDE))
43406- return 0;
43407-
43408- /*
43409 * Searching includes executable on directories, else just read.
43410 */
43411 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
43412@@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
43413 if (capable(CAP_DAC_READ_SEARCH))
43414 return 0;
43415
43416+ /*
43417+ * Read/write DACs are always overridable.
43418+ * Executable DACs are overridable if at least one exec bit is set.
43419+ */
43420+ if (!(mask & MAY_EXEC) || execute_ok(inode))
43421+ if (capable(CAP_DAC_OVERRIDE))
43422+ return 0;
43423+
43424 return -EACCES;
43425 }
43426
43427@@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
43428 if (!ret)
43429 goto ok;
43430
43431- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
43432+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
43433+ capable(CAP_DAC_OVERRIDE))
43434 goto ok;
43435
43436 return ret;
43437@@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
43438 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
43439 error = PTR_ERR(cookie);
43440 if (!IS_ERR(cookie)) {
43441- char *s = nd_get_link(nd);
43442+ const char *s = nd_get_link(nd);
43443 error = 0;
43444 if (s)
43445 error = __vfs_follow_link(nd, s);
43446@@ -669,6 +670,13 @@ static inline int do_follow_link(struct
43447 err = security_inode_follow_link(path->dentry, nd);
43448 if (err)
43449 goto loop;
43450+
43451+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
43452+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
43453+ err = -EACCES;
43454+ goto loop;
43455+ }
43456+
43457 current->link_count++;
43458 current->total_link_count++;
43459 nd->depth++;
43460@@ -1016,11 +1024,18 @@ return_reval:
43461 break;
43462 }
43463 return_base:
43464+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
43465+ path_put(&nd->path);
43466+ return -ENOENT;
43467+ }
43468 return 0;
43469 out_dput:
43470 path_put_conditional(&next, nd);
43471 break;
43472 }
43473+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
43474+ err = -ENOENT;
43475+
43476 path_put(&nd->path);
43477 return_err:
43478 return err;
43479@@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
43480 int retval = path_init(dfd, name, flags, nd);
43481 if (!retval)
43482 retval = path_walk(name, nd);
43483- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
43484- nd->path.dentry->d_inode))
43485- audit_inode(name, nd->path.dentry);
43486+
43487+ if (likely(!retval)) {
43488+ if (nd->path.dentry && nd->path.dentry->d_inode) {
43489+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
43490+ retval = -ENOENT;
43491+ if (!audit_dummy_context())
43492+ audit_inode(name, nd->path.dentry);
43493+ }
43494+ }
43495 if (nd->root.mnt) {
43496 path_put(&nd->root);
43497 nd->root.mnt = NULL;
43498 }
43499+
43500 return retval;
43501 }
43502
43503@@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
43504 if (error)
43505 goto err_out;
43506
43507+
43508+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
43509+ error = -EPERM;
43510+ goto err_out;
43511+ }
43512+ if (gr_handle_rawio(inode)) {
43513+ error = -EPERM;
43514+ goto err_out;
43515+ }
43516+ if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
43517+ error = -EACCES;
43518+ goto err_out;
43519+ }
43520+
43521 if (flag & O_TRUNC) {
43522 error = get_write_access(inode);
43523 if (error)
43524@@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
43525 int error;
43526 struct dentry *dir = nd->path.dentry;
43527
43528+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
43529+ error = -EACCES;
43530+ goto out_unlock;
43531+ }
43532+
43533 if (!IS_POSIXACL(dir->d_inode))
43534 mode &= ~current_umask();
43535 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
43536 if (error)
43537 goto out_unlock;
43538 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
43539+ if (!error)
43540+ gr_handle_create(path->dentry, nd->path.mnt);
43541 out_unlock:
43542 mutex_unlock(&dir->d_inode->i_mutex);
43543 dput(nd->path.dentry);
43544@@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
43545 &nd, flag);
43546 if (error)
43547 return ERR_PTR(error);
43548+
43549+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
43550+ error = -EPERM;
43551+ goto exit;
43552+ }
43553+
43554+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
43555+ error = -EPERM;
43556+ goto exit;
43557+ }
43558+
43559+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
43560+ error = -EACCES;
43561+ goto exit;
43562+ }
43563+
43564 goto ok;
43565 }
43566
43567@@ -1795,6 +1854,14 @@ do_last:
43568 /*
43569 * It already exists.
43570 */
43571+
43572+ /* only check if O_CREAT is specified, all other checks need
43573+ to go into may_open */
43574+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
43575+ error = -EACCES;
43576+ goto exit_mutex_unlock;
43577+ }
43578+
43579 mutex_unlock(&dir->d_inode->i_mutex);
43580 audit_inode(pathname, path.dentry);
43581
43582@@ -1887,6 +1954,13 @@ do_link:
43583 error = security_inode_follow_link(path.dentry, &nd);
43584 if (error)
43585 goto exit_dput;
43586+
43587+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
43588+ path.dentry, nd.path.mnt)) {
43589+ error = -EACCES;
43590+ goto exit_dput;
43591+ }
43592+
43593 error = __do_follow_link(&path, &nd);
43594 if (error) {
43595 /* Does someone understand code flow here? Or it is only
43596@@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43597 error = may_mknod(mode);
43598 if (error)
43599 goto out_dput;
43600+
43601+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
43602+ error = -EPERM;
43603+ goto out_dput;
43604+ }
43605+
43606+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
43607+ error = -EACCES;
43608+ goto out_dput;
43609+ }
43610+
43611 error = mnt_want_write(nd.path.mnt);
43612 if (error)
43613 goto out_dput;
43614@@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
43615 }
43616 out_drop_write:
43617 mnt_drop_write(nd.path.mnt);
43618+
43619+ if (!error)
43620+ gr_handle_create(dentry, nd.path.mnt);
43621 out_dput:
43622 dput(dentry);
43623 out_unlock:
43624@@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43625 if (IS_ERR(dentry))
43626 goto out_unlock;
43627
43628+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
43629+ error = -EACCES;
43630+ goto out_dput;
43631+ }
43632+
43633 if (!IS_POSIXACL(nd.path.dentry->d_inode))
43634 mode &= ~current_umask();
43635 error = mnt_want_write(nd.path.mnt);
43636@@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
43637 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
43638 out_drop_write:
43639 mnt_drop_write(nd.path.mnt);
43640+
43641+ if (!error)
43642+ gr_handle_create(dentry, nd.path.mnt);
43643+
43644 out_dput:
43645 dput(dentry);
43646 out_unlock:
43647@@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
43648 char * name;
43649 struct dentry *dentry;
43650 struct nameidata nd;
43651+ ino_t saved_ino = 0;
43652+ dev_t saved_dev = 0;
43653
43654 error = user_path_parent(dfd, pathname, &nd, &name);
43655 if (error)
43656@@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
43657 error = PTR_ERR(dentry);
43658 if (IS_ERR(dentry))
43659 goto exit2;
43660+
43661+ if (dentry->d_inode != NULL) {
43662+ if (dentry->d_inode->i_nlink <= 1) {
43663+ saved_ino = dentry->d_inode->i_ino;
43664+ saved_dev = gr_get_dev_from_dentry(dentry);
43665+ }
43666+
43667+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
43668+ error = -EACCES;
43669+ goto exit3;
43670+ }
43671+ }
43672+
43673 error = mnt_want_write(nd.path.mnt);
43674 if (error)
43675 goto exit3;
43676@@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
43677 if (error)
43678 goto exit4;
43679 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
43680+ if (!error && (saved_dev || saved_ino))
43681+ gr_handle_delete(saved_ino, saved_dev);
43682 exit4:
43683 mnt_drop_write(nd.path.mnt);
43684 exit3:
43685@@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
43686 struct dentry *dentry;
43687 struct nameidata nd;
43688 struct inode *inode = NULL;
43689+ ino_t saved_ino = 0;
43690+ dev_t saved_dev = 0;
43691
43692 error = user_path_parent(dfd, pathname, &nd, &name);
43693 if (error)
43694@@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
43695 if (nd.last.name[nd.last.len])
43696 goto slashes;
43697 inode = dentry->d_inode;
43698- if (inode)
43699+ if (inode) {
43700+ if (inode->i_nlink <= 1) {
43701+ saved_ino = inode->i_ino;
43702+ saved_dev = gr_get_dev_from_dentry(dentry);
43703+ }
43704+
43705 atomic_inc(&inode->i_count);
43706+
43707+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
43708+ error = -EACCES;
43709+ goto exit2;
43710+ }
43711+ }
43712 error = mnt_want_write(nd.path.mnt);
43713 if (error)
43714 goto exit2;
43715@@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
43716 if (error)
43717 goto exit3;
43718 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
43719+ if (!error && (saved_ino || saved_dev))
43720+ gr_handle_delete(saved_ino, saved_dev);
43721 exit3:
43722 mnt_drop_write(nd.path.mnt);
43723 exit2:
43724@@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
43725 if (IS_ERR(dentry))
43726 goto out_unlock;
43727
43728+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
43729+ error = -EACCES;
43730+ goto out_dput;
43731+ }
43732+
43733 error = mnt_want_write(nd.path.mnt);
43734 if (error)
43735 goto out_dput;
43736@@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
43737 if (error)
43738 goto out_drop_write;
43739 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
43740+ if (!error)
43741+ gr_handle_create(dentry, nd.path.mnt);
43742 out_drop_write:
43743 mnt_drop_write(nd.path.mnt);
43744 out_dput:
43745@@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
43746 error = PTR_ERR(new_dentry);
43747 if (IS_ERR(new_dentry))
43748 goto out_unlock;
43749+
43750+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
43751+ old_path.dentry->d_inode,
43752+ old_path.dentry->d_inode->i_mode, to)) {
43753+ error = -EACCES;
43754+ goto out_dput;
43755+ }
43756+
43757+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
43758+ old_path.dentry, old_path.mnt, to)) {
43759+ error = -EACCES;
43760+ goto out_dput;
43761+ }
43762+
43763 error = mnt_want_write(nd.path.mnt);
43764 if (error)
43765 goto out_dput;
43766@@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
43767 if (error)
43768 goto out_drop_write;
43769 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
43770+ if (!error)
43771+ gr_handle_create(new_dentry, nd.path.mnt);
43772 out_drop_write:
43773 mnt_drop_write(nd.path.mnt);
43774 out_dput:
43775@@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
43776 char *to;
43777 int error;
43778
43779+ pax_track_stack();
43780+
43781 error = user_path_parent(olddfd, oldname, &oldnd, &from);
43782 if (error)
43783 goto exit;
43784@@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
43785 if (new_dentry == trap)
43786 goto exit5;
43787
43788+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
43789+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
43790+ to);
43791+ if (error)
43792+ goto exit5;
43793+
43794 error = mnt_want_write(oldnd.path.mnt);
43795 if (error)
43796 goto exit5;
43797@@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
43798 goto exit6;
43799 error = vfs_rename(old_dir->d_inode, old_dentry,
43800 new_dir->d_inode, new_dentry);
43801+ if (!error)
43802+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
43803+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
43804 exit6:
43805 mnt_drop_write(oldnd.path.mnt);
43806 exit5:
43807@@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
43808
43809 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
43810 {
43811+ char tmpbuf[64];
43812+ const char *newlink;
43813 int len;
43814
43815 len = PTR_ERR(link);
43816@@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
43817 len = strlen(link);
43818 if (len > (unsigned) buflen)
43819 len = buflen;
43820- if (copy_to_user(buffer, link, len))
43821+
43822+ if (len < sizeof(tmpbuf)) {
43823+ memcpy(tmpbuf, link, len);
43824+ newlink = tmpbuf;
43825+ } else
43826+ newlink = link;
43827+
43828+ if (copy_to_user(buffer, newlink, len))
43829 len = -EFAULT;
43830 out:
43831 return len;
43832diff -urNp linux-2.6.32.46/fs/namespace.c linux-2.6.32.46/fs/namespace.c
43833--- linux-2.6.32.46/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
43834+++ linux-2.6.32.46/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
43835@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
43836 if (!(sb->s_flags & MS_RDONLY))
43837 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
43838 up_write(&sb->s_umount);
43839+
43840+ gr_log_remount(mnt->mnt_devname, retval);
43841+
43842 return retval;
43843 }
43844
43845@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
43846 security_sb_umount_busy(mnt);
43847 up_write(&namespace_sem);
43848 release_mounts(&umount_list);
43849+
43850+ gr_log_unmount(mnt->mnt_devname, retval);
43851+
43852 return retval;
43853 }
43854
43855@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
43856 if (retval)
43857 goto dput_out;
43858
43859+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
43860+ retval = -EPERM;
43861+ goto dput_out;
43862+ }
43863+
43864+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
43865+ retval = -EPERM;
43866+ goto dput_out;
43867+ }
43868+
43869 if (flags & MS_REMOUNT)
43870 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
43871 data_page);
43872@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
43873 dev_name, data_page);
43874 dput_out:
43875 path_put(&path);
43876+
43877+ gr_log_mount(dev_name, dir_name, retval);
43878+
43879 return retval;
43880 }
43881
43882@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
43883 goto out1;
43884 }
43885
43886+ if (gr_handle_chroot_pivot()) {
43887+ error = -EPERM;
43888+ path_put(&old);
43889+ goto out1;
43890+ }
43891+
43892 read_lock(&current->fs->lock);
43893 root = current->fs->root;
43894 path_get(&current->fs->root);
43895diff -urNp linux-2.6.32.46/fs/ncpfs/dir.c linux-2.6.32.46/fs/ncpfs/dir.c
43896--- linux-2.6.32.46/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
43897+++ linux-2.6.32.46/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
43898@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
43899 int res, val = 0, len;
43900 __u8 __name[NCP_MAXPATHLEN + 1];
43901
43902+ pax_track_stack();
43903+
43904 parent = dget_parent(dentry);
43905 dir = parent->d_inode;
43906
43907@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
43908 int error, res, len;
43909 __u8 __name[NCP_MAXPATHLEN + 1];
43910
43911+ pax_track_stack();
43912+
43913 lock_kernel();
43914 error = -EIO;
43915 if (!ncp_conn_valid(server))
43916@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
43917 int error, result, len;
43918 int opmode;
43919 __u8 __name[NCP_MAXPATHLEN + 1];
43920-
43921+
43922 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
43923 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
43924
43925+ pax_track_stack();
43926+
43927 error = -EIO;
43928 lock_kernel();
43929 if (!ncp_conn_valid(server))
43930@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
43931 int error, len;
43932 __u8 __name[NCP_MAXPATHLEN + 1];
43933
43934+ pax_track_stack();
43935+
43936 DPRINTK("ncp_mkdir: making %s/%s\n",
43937 dentry->d_parent->d_name.name, dentry->d_name.name);
43938
43939@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
43940 if (!ncp_conn_valid(server))
43941 goto out;
43942
43943+ pax_track_stack();
43944+
43945 ncp_age_dentry(server, dentry);
43946 len = sizeof(__name);
43947 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
43948@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
43949 int old_len, new_len;
43950 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
43951
43952+ pax_track_stack();
43953+
43954 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
43955 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
43956 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
43957diff -urNp linux-2.6.32.46/fs/ncpfs/inode.c linux-2.6.32.46/fs/ncpfs/inode.c
43958--- linux-2.6.32.46/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
43959+++ linux-2.6.32.46/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
43960@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
43961 #endif
43962 struct ncp_entry_info finfo;
43963
43964+ pax_track_stack();
43965+
43966 data.wdog_pid = NULL;
43967 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
43968 if (!server)
43969diff -urNp linux-2.6.32.46/fs/nfs/inode.c linux-2.6.32.46/fs/nfs/inode.c
43970--- linux-2.6.32.46/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
43971+++ linux-2.6.32.46/fs/nfs/inode.c 2011-07-06 19:53:33.000000000 -0400
43972@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct
43973 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
43974 nfsi->attrtimeo_timestamp = jiffies;
43975
43976- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
43977+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
43978 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
43979 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
43980 else
43981@@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
43982 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
43983 }
43984
43985-static atomic_long_t nfs_attr_generation_counter;
43986+static atomic_long_unchecked_t nfs_attr_generation_counter;
43987
43988 static unsigned long nfs_read_attr_generation_counter(void)
43989 {
43990- return atomic_long_read(&nfs_attr_generation_counter);
43991+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
43992 }
43993
43994 unsigned long nfs_inc_attr_generation_counter(void)
43995 {
43996- return atomic_long_inc_return(&nfs_attr_generation_counter);
43997+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
43998 }
43999
44000 void nfs_fattr_init(struct nfs_fattr *fattr)
44001diff -urNp linux-2.6.32.46/fs/nfsd/lockd.c linux-2.6.32.46/fs/nfsd/lockd.c
44002--- linux-2.6.32.46/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
44003+++ linux-2.6.32.46/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
44004@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
44005 fput(filp);
44006 }
44007
44008-static struct nlmsvc_binding nfsd_nlm_ops = {
44009+static const struct nlmsvc_binding nfsd_nlm_ops = {
44010 .fopen = nlm_fopen, /* open file for locking */
44011 .fclose = nlm_fclose, /* close file */
44012 };
44013diff -urNp linux-2.6.32.46/fs/nfsd/nfs4state.c linux-2.6.32.46/fs/nfsd/nfs4state.c
44014--- linux-2.6.32.46/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
44015+++ linux-2.6.32.46/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
44016@@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
44017 unsigned int cmd;
44018 int err;
44019
44020+ pax_track_stack();
44021+
44022 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
44023 (long long) lock->lk_offset,
44024 (long long) lock->lk_length);
44025diff -urNp linux-2.6.32.46/fs/nfsd/nfs4xdr.c linux-2.6.32.46/fs/nfsd/nfs4xdr.c
44026--- linux-2.6.32.46/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
44027+++ linux-2.6.32.46/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
44028@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
44029 struct nfsd4_compoundres *resp = rqstp->rq_resp;
44030 u32 minorversion = resp->cstate.minorversion;
44031
44032+ pax_track_stack();
44033+
44034 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
44035 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
44036 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
44037diff -urNp linux-2.6.32.46/fs/nfsd/vfs.c linux-2.6.32.46/fs/nfsd/vfs.c
44038--- linux-2.6.32.46/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
44039+++ linux-2.6.32.46/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
44040@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
44041 } else {
44042 oldfs = get_fs();
44043 set_fs(KERNEL_DS);
44044- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44045+ host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
44046 set_fs(oldfs);
44047 }
44048
44049@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
44050
44051 /* Write the data. */
44052 oldfs = get_fs(); set_fs(KERNEL_DS);
44053- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44054+ host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
44055 set_fs(oldfs);
44056 if (host_err < 0)
44057 goto out_nfserr;
44058@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
44059 */
44060
44061 oldfs = get_fs(); set_fs(KERNEL_DS);
44062- host_err = inode->i_op->readlink(dentry, buf, *lenp);
44063+ host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
44064 set_fs(oldfs);
44065
44066 if (host_err < 0)
44067diff -urNp linux-2.6.32.46/fs/nilfs2/ioctl.c linux-2.6.32.46/fs/nilfs2/ioctl.c
44068--- linux-2.6.32.46/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
44069+++ linux-2.6.32.46/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
44070@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
44071 unsigned int cmd, void __user *argp)
44072 {
44073 struct nilfs_argv argv[5];
44074- const static size_t argsz[5] = {
44075+ static const size_t argsz[5] = {
44076 sizeof(struct nilfs_vdesc),
44077 sizeof(struct nilfs_period),
44078 sizeof(__u64),
44079diff -urNp linux-2.6.32.46/fs/notify/dnotify/dnotify.c linux-2.6.32.46/fs/notify/dnotify/dnotify.c
44080--- linux-2.6.32.46/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
44081+++ linux-2.6.32.46/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
44082@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
44083 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
44084 }
44085
44086-static struct fsnotify_ops dnotify_fsnotify_ops = {
44087+static const struct fsnotify_ops dnotify_fsnotify_ops = {
44088 .handle_event = dnotify_handle_event,
44089 .should_send_event = dnotify_should_send_event,
44090 .free_group_priv = NULL,
44091diff -urNp linux-2.6.32.46/fs/notify/notification.c linux-2.6.32.46/fs/notify/notification.c
44092--- linux-2.6.32.46/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
44093+++ linux-2.6.32.46/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
44094@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
44095 * get set to 0 so it will never get 'freed'
44096 */
44097 static struct fsnotify_event q_overflow_event;
44098-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44099+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44100
44101 /**
44102 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44103@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44104 */
44105 u32 fsnotify_get_cookie(void)
44106 {
44107- return atomic_inc_return(&fsnotify_sync_cookie);
44108+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44109 }
44110 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44111
44112diff -urNp linux-2.6.32.46/fs/ntfs/dir.c linux-2.6.32.46/fs/ntfs/dir.c
44113--- linux-2.6.32.46/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
44114+++ linux-2.6.32.46/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
44115@@ -1328,7 +1328,7 @@ find_next_index_buffer:
44116 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44117 ~(s64)(ndir->itype.index.block_size - 1)));
44118 /* Bounds checks. */
44119- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44120+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44121 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44122 "inode 0x%lx or driver bug.", vdir->i_ino);
44123 goto err_out;
44124diff -urNp linux-2.6.32.46/fs/ntfs/file.c linux-2.6.32.46/fs/ntfs/file.c
44125--- linux-2.6.32.46/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
44126+++ linux-2.6.32.46/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
44127@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
44128 #endif /* NTFS_RW */
44129 };
44130
44131-const struct file_operations ntfs_empty_file_ops = {};
44132+const struct file_operations ntfs_empty_file_ops __read_only;
44133
44134-const struct inode_operations ntfs_empty_inode_ops = {};
44135+const struct inode_operations ntfs_empty_inode_ops __read_only;
44136diff -urNp linux-2.6.32.46/fs/ocfs2/cluster/masklog.c linux-2.6.32.46/fs/ocfs2/cluster/masklog.c
44137--- linux-2.6.32.46/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
44138+++ linux-2.6.32.46/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
44139@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
44140 return mlog_mask_store(mlog_attr->mask, buf, count);
44141 }
44142
44143-static struct sysfs_ops mlog_attr_ops = {
44144+static const struct sysfs_ops mlog_attr_ops = {
44145 .show = mlog_show,
44146 .store = mlog_store,
44147 };
44148diff -urNp linux-2.6.32.46/fs/ocfs2/localalloc.c linux-2.6.32.46/fs/ocfs2/localalloc.c
44149--- linux-2.6.32.46/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
44150+++ linux-2.6.32.46/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
44151@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
44152 goto bail;
44153 }
44154
44155- atomic_inc(&osb->alloc_stats.moves);
44156+ atomic_inc_unchecked(&osb->alloc_stats.moves);
44157
44158 status = 0;
44159 bail:
44160diff -urNp linux-2.6.32.46/fs/ocfs2/namei.c linux-2.6.32.46/fs/ocfs2/namei.c
44161--- linux-2.6.32.46/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
44162+++ linux-2.6.32.46/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
44163@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
44164 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
44165 struct ocfs2_dir_lookup_result target_insert = { NULL, };
44166
44167+ pax_track_stack();
44168+
44169 /* At some point it might be nice to break this function up a
44170 * bit. */
44171
44172diff -urNp linux-2.6.32.46/fs/ocfs2/ocfs2.h linux-2.6.32.46/fs/ocfs2/ocfs2.h
44173--- linux-2.6.32.46/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
44174+++ linux-2.6.32.46/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
44175@@ -217,11 +217,11 @@ enum ocfs2_vol_state
44176
44177 struct ocfs2_alloc_stats
44178 {
44179- atomic_t moves;
44180- atomic_t local_data;
44181- atomic_t bitmap_data;
44182- atomic_t bg_allocs;
44183- atomic_t bg_extends;
44184+ atomic_unchecked_t moves;
44185+ atomic_unchecked_t local_data;
44186+ atomic_unchecked_t bitmap_data;
44187+ atomic_unchecked_t bg_allocs;
44188+ atomic_unchecked_t bg_extends;
44189 };
44190
44191 enum ocfs2_local_alloc_state
44192diff -urNp linux-2.6.32.46/fs/ocfs2/suballoc.c linux-2.6.32.46/fs/ocfs2/suballoc.c
44193--- linux-2.6.32.46/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
44194+++ linux-2.6.32.46/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
44195@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
44196 mlog_errno(status);
44197 goto bail;
44198 }
44199- atomic_inc(&osb->alloc_stats.bg_extends);
44200+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44201
44202 /* You should never ask for this much metadata */
44203 BUG_ON(bits_wanted >
44204@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
44205 mlog_errno(status);
44206 goto bail;
44207 }
44208- atomic_inc(&osb->alloc_stats.bg_allocs);
44209+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44210
44211 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
44212 ac->ac_bits_given += (*num_bits);
44213@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
44214 mlog_errno(status);
44215 goto bail;
44216 }
44217- atomic_inc(&osb->alloc_stats.bg_allocs);
44218+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44219
44220 BUG_ON(num_bits != 1);
44221
44222@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44223 cluster_start,
44224 num_clusters);
44225 if (!status)
44226- atomic_inc(&osb->alloc_stats.local_data);
44227+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
44228 } else {
44229 if (min_clusters > (osb->bitmap_cpg - 1)) {
44230 /* The only paths asking for contiguousness
44231@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44232 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44233 bg_blkno,
44234 bg_bit_off);
44235- atomic_inc(&osb->alloc_stats.bitmap_data);
44236+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44237 }
44238 }
44239 if (status < 0) {
44240diff -urNp linux-2.6.32.46/fs/ocfs2/super.c linux-2.6.32.46/fs/ocfs2/super.c
44241--- linux-2.6.32.46/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
44242+++ linux-2.6.32.46/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
44243@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
44244 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44245 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44246 "Stats",
44247- atomic_read(&osb->alloc_stats.bitmap_data),
44248- atomic_read(&osb->alloc_stats.local_data),
44249- atomic_read(&osb->alloc_stats.bg_allocs),
44250- atomic_read(&osb->alloc_stats.moves),
44251- atomic_read(&osb->alloc_stats.bg_extends));
44252+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44253+ atomic_read_unchecked(&osb->alloc_stats.local_data),
44254+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44255+ atomic_read_unchecked(&osb->alloc_stats.moves),
44256+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44257
44258 out += snprintf(buf + out, len - out,
44259 "%10s => State: %u Descriptor: %llu Size: %u bits "
44260@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
44261 spin_lock_init(&osb->osb_xattr_lock);
44262 ocfs2_init_inode_steal_slot(osb);
44263
44264- atomic_set(&osb->alloc_stats.moves, 0);
44265- atomic_set(&osb->alloc_stats.local_data, 0);
44266- atomic_set(&osb->alloc_stats.bitmap_data, 0);
44267- atomic_set(&osb->alloc_stats.bg_allocs, 0);
44268- atomic_set(&osb->alloc_stats.bg_extends, 0);
44269+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44270+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44271+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44272+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44273+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44274
44275 /* Copy the blockcheck stats from the superblock probe */
44276 osb->osb_ecc_stats = *stats;
44277diff -urNp linux-2.6.32.46/fs/open.c linux-2.6.32.46/fs/open.c
44278--- linux-2.6.32.46/fs/open.c 2011-03-27 14:31:47.000000000 -0400
44279+++ linux-2.6.32.46/fs/open.c 2011-04-17 15:56:46.000000000 -0400
44280@@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
44281 error = locks_verify_truncate(inode, NULL, length);
44282 if (!error)
44283 error = security_path_truncate(&path, length, 0);
44284+
44285+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
44286+ error = -EACCES;
44287+
44288 if (!error) {
44289 vfs_dq_init(inode);
44290 error = do_truncate(path.dentry, length, 0, NULL);
44291@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
44292 if (__mnt_is_readonly(path.mnt))
44293 res = -EROFS;
44294
44295+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
44296+ res = -EACCES;
44297+
44298 out_path_release:
44299 path_put(&path);
44300 out:
44301@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
44302 if (error)
44303 goto dput_and_out;
44304
44305+ gr_log_chdir(path.dentry, path.mnt);
44306+
44307 set_fs_pwd(current->fs, &path);
44308
44309 dput_and_out:
44310@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
44311 goto out_putf;
44312
44313 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
44314+
44315+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
44316+ error = -EPERM;
44317+
44318+ if (!error)
44319+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
44320+
44321 if (!error)
44322 set_fs_pwd(current->fs, &file->f_path);
44323 out_putf:
44324@@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
44325 if (!capable(CAP_SYS_CHROOT))
44326 goto dput_and_out;
44327
44328+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
44329+ goto dput_and_out;
44330+
44331+ if (gr_handle_chroot_caps(&path)) {
44332+ error = -ENOMEM;
44333+ goto dput_and_out;
44334+ }
44335+
44336 set_fs_root(current->fs, &path);
44337+
44338+ gr_handle_chroot_chdir(&path);
44339+
44340 error = 0;
44341 dput_and_out:
44342 path_put(&path);
44343@@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
44344 err = mnt_want_write_file(file);
44345 if (err)
44346 goto out_putf;
44347+
44348 mutex_lock(&inode->i_mutex);
44349+
44350+ if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
44351+ err = -EACCES;
44352+ goto out_unlock;
44353+ }
44354+
44355 if (mode == (mode_t) -1)
44356 mode = inode->i_mode;
44357+
44358+ if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
44359+ err = -EPERM;
44360+ goto out_unlock;
44361+ }
44362+
44363 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44364 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44365 err = notify_change(dentry, &newattrs);
44366+
44367+out_unlock:
44368 mutex_unlock(&inode->i_mutex);
44369 mnt_drop_write(file->f_path.mnt);
44370 out_putf:
44371@@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
44372 error = mnt_want_write(path.mnt);
44373 if (error)
44374 goto dput_and_out;
44375+
44376 mutex_lock(&inode->i_mutex);
44377+
44378+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
44379+ error = -EACCES;
44380+ goto out_unlock;
44381+ }
44382+
44383 if (mode == (mode_t) -1)
44384 mode = inode->i_mode;
44385+
44386+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
44387+ error = -EACCES;
44388+ goto out_unlock;
44389+ }
44390+
44391 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44392 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44393 error = notify_change(path.dentry, &newattrs);
44394+
44395+out_unlock:
44396 mutex_unlock(&inode->i_mutex);
44397 mnt_drop_write(path.mnt);
44398 dput_and_out:
44399@@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
44400 return sys_fchmodat(AT_FDCWD, filename, mode);
44401 }
44402
44403-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
44404+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
44405 {
44406 struct inode *inode = dentry->d_inode;
44407 int error;
44408 struct iattr newattrs;
44409
44410+ if (!gr_acl_handle_chown(dentry, mnt))
44411+ return -EACCES;
44412+
44413 newattrs.ia_valid = ATTR_CTIME;
44414 if (user != (uid_t) -1) {
44415 newattrs.ia_valid |= ATTR_UID;
44416@@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
44417 error = mnt_want_write(path.mnt);
44418 if (error)
44419 goto out_release;
44420- error = chown_common(path.dentry, user, group);
44421+ error = chown_common(path.dentry, user, group, path.mnt);
44422 mnt_drop_write(path.mnt);
44423 out_release:
44424 path_put(&path);
44425@@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
44426 error = mnt_want_write(path.mnt);
44427 if (error)
44428 goto out_release;
44429- error = chown_common(path.dentry, user, group);
44430+ error = chown_common(path.dentry, user, group, path.mnt);
44431 mnt_drop_write(path.mnt);
44432 out_release:
44433 path_put(&path);
44434@@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
44435 error = mnt_want_write(path.mnt);
44436 if (error)
44437 goto out_release;
44438- error = chown_common(path.dentry, user, group);
44439+ error = chown_common(path.dentry, user, group, path.mnt);
44440 mnt_drop_write(path.mnt);
44441 out_release:
44442 path_put(&path);
44443@@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
44444 goto out_fput;
44445 dentry = file->f_path.dentry;
44446 audit_inode(NULL, dentry);
44447- error = chown_common(dentry, user, group);
44448+ error = chown_common(dentry, user, group, file->f_path.mnt);
44449 mnt_drop_write(file->f_path.mnt);
44450 out_fput:
44451 fput(file);
44452@@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
44453 if (!IS_ERR(tmp)) {
44454 fd = get_unused_fd_flags(flags);
44455 if (fd >= 0) {
44456- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
44457+ struct file *f;
44458+ /* don't allow to be set by userland */
44459+ flags &= ~FMODE_GREXEC;
44460+ f = do_filp_open(dfd, tmp, flags, mode, 0);
44461 if (IS_ERR(f)) {
44462 put_unused_fd(fd);
44463 fd = PTR_ERR(f);
44464diff -urNp linux-2.6.32.46/fs/partitions/ldm.c linux-2.6.32.46/fs/partitions/ldm.c
44465--- linux-2.6.32.46/fs/partitions/ldm.c 2011-06-25 12:55:34.000000000 -0400
44466+++ linux-2.6.32.46/fs/partitions/ldm.c 2011-06-25 12:56:37.000000000 -0400
44467@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
44468 ldm_error ("A VBLK claims to have %d parts.", num);
44469 return false;
44470 }
44471+
44472 if (rec >= num) {
44473 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
44474 return false;
44475@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
44476 goto found;
44477 }
44478
44479- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
44480+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
44481 if (!f) {
44482 ldm_crit ("Out of memory.");
44483 return false;
44484diff -urNp linux-2.6.32.46/fs/partitions/mac.c linux-2.6.32.46/fs/partitions/mac.c
44485--- linux-2.6.32.46/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
44486+++ linux-2.6.32.46/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
44487@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
44488 return 0; /* not a MacOS disk */
44489 }
44490 blocks_in_map = be32_to_cpu(part->map_count);
44491+ printk(" [mac]");
44492 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
44493 put_dev_sector(sect);
44494 return 0;
44495 }
44496- printk(" [mac]");
44497 for (slot = 1; slot <= blocks_in_map; ++slot) {
44498 int pos = slot * secsize;
44499 put_dev_sector(sect);
44500diff -urNp linux-2.6.32.46/fs/pipe.c linux-2.6.32.46/fs/pipe.c
44501--- linux-2.6.32.46/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
44502+++ linux-2.6.32.46/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
44503@@ -401,9 +401,9 @@ redo:
44504 }
44505 if (bufs) /* More to do? */
44506 continue;
44507- if (!pipe->writers)
44508+ if (!atomic_read(&pipe->writers))
44509 break;
44510- if (!pipe->waiting_writers) {
44511+ if (!atomic_read(&pipe->waiting_writers)) {
44512 /* syscall merging: Usually we must not sleep
44513 * if O_NONBLOCK is set, or if we got some data.
44514 * But if a writer sleeps in kernel space, then
44515@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
44516 mutex_lock(&inode->i_mutex);
44517 pipe = inode->i_pipe;
44518
44519- if (!pipe->readers) {
44520+ if (!atomic_read(&pipe->readers)) {
44521 send_sig(SIGPIPE, current, 0);
44522 ret = -EPIPE;
44523 goto out;
44524@@ -511,7 +511,7 @@ redo1:
44525 for (;;) {
44526 int bufs;
44527
44528- if (!pipe->readers) {
44529+ if (!atomic_read(&pipe->readers)) {
44530 send_sig(SIGPIPE, current, 0);
44531 if (!ret)
44532 ret = -EPIPE;
44533@@ -597,9 +597,9 @@ redo2:
44534 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44535 do_wakeup = 0;
44536 }
44537- pipe->waiting_writers++;
44538+ atomic_inc(&pipe->waiting_writers);
44539 pipe_wait(pipe);
44540- pipe->waiting_writers--;
44541+ atomic_dec(&pipe->waiting_writers);
44542 }
44543 out:
44544 mutex_unlock(&inode->i_mutex);
44545@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
44546 mask = 0;
44547 if (filp->f_mode & FMODE_READ) {
44548 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
44549- if (!pipe->writers && filp->f_version != pipe->w_counter)
44550+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
44551 mask |= POLLHUP;
44552 }
44553
44554@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
44555 * Most Unices do not set POLLERR for FIFOs but on Linux they
44556 * behave exactly like pipes for poll().
44557 */
44558- if (!pipe->readers)
44559+ if (!atomic_read(&pipe->readers))
44560 mask |= POLLERR;
44561 }
44562
44563@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
44564
44565 mutex_lock(&inode->i_mutex);
44566 pipe = inode->i_pipe;
44567- pipe->readers -= decr;
44568- pipe->writers -= decw;
44569+ atomic_sub(decr, &pipe->readers);
44570+ atomic_sub(decw, &pipe->writers);
44571
44572- if (!pipe->readers && !pipe->writers) {
44573+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
44574 free_pipe_info(inode);
44575 } else {
44576 wake_up_interruptible_sync(&pipe->wait);
44577@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
44578
44579 if (inode->i_pipe) {
44580 ret = 0;
44581- inode->i_pipe->readers++;
44582+ atomic_inc(&inode->i_pipe->readers);
44583 }
44584
44585 mutex_unlock(&inode->i_mutex);
44586@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
44587
44588 if (inode->i_pipe) {
44589 ret = 0;
44590- inode->i_pipe->writers++;
44591+ atomic_inc(&inode->i_pipe->writers);
44592 }
44593
44594 mutex_unlock(&inode->i_mutex);
44595@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
44596 if (inode->i_pipe) {
44597 ret = 0;
44598 if (filp->f_mode & FMODE_READ)
44599- inode->i_pipe->readers++;
44600+ atomic_inc(&inode->i_pipe->readers);
44601 if (filp->f_mode & FMODE_WRITE)
44602- inode->i_pipe->writers++;
44603+ atomic_inc(&inode->i_pipe->writers);
44604 }
44605
44606 mutex_unlock(&inode->i_mutex);
44607@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
44608 inode->i_pipe = NULL;
44609 }
44610
44611-static struct vfsmount *pipe_mnt __read_mostly;
44612+struct vfsmount *pipe_mnt __read_mostly;
44613 static int pipefs_delete_dentry(struct dentry *dentry)
44614 {
44615 /*
44616@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
44617 goto fail_iput;
44618 inode->i_pipe = pipe;
44619
44620- pipe->readers = pipe->writers = 1;
44621+ atomic_set(&pipe->readers, 1);
44622+ atomic_set(&pipe->writers, 1);
44623 inode->i_fop = &rdwr_pipefifo_fops;
44624
44625 /*
44626diff -urNp linux-2.6.32.46/fs/proc/array.c linux-2.6.32.46/fs/proc/array.c
44627--- linux-2.6.32.46/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
44628+++ linux-2.6.32.46/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
44629@@ -60,6 +60,7 @@
44630 #include <linux/tty.h>
44631 #include <linux/string.h>
44632 #include <linux/mman.h>
44633+#include <linux/grsecurity.h>
44634 #include <linux/proc_fs.h>
44635 #include <linux/ioport.h>
44636 #include <linux/uaccess.h>
44637@@ -321,6 +322,21 @@ static inline void task_context_switch_c
44638 p->nivcsw);
44639 }
44640
44641+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44642+static inline void task_pax(struct seq_file *m, struct task_struct *p)
44643+{
44644+ if (p->mm)
44645+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
44646+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
44647+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
44648+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
44649+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
44650+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
44651+ else
44652+ seq_printf(m, "PaX:\t-----\n");
44653+}
44654+#endif
44655+
44656 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
44657 struct pid *pid, struct task_struct *task)
44658 {
44659@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
44660 task_cap(m, task);
44661 cpuset_task_status_allowed(m, task);
44662 task_context_switch_counts(m, task);
44663+
44664+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
44665+ task_pax(m, task);
44666+#endif
44667+
44668+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
44669+ task_grsec_rbac(m, task);
44670+#endif
44671+
44672 return 0;
44673 }
44674
44675+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44676+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
44677+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
44678+ _mm->pax_flags & MF_PAX_SEGMEXEC))
44679+#endif
44680+
44681 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
44682 struct pid *pid, struct task_struct *task, int whole)
44683 {
44684@@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
44685 cputime_t cutime, cstime, utime, stime;
44686 cputime_t cgtime, gtime;
44687 unsigned long rsslim = 0;
44688- char tcomm[sizeof(task->comm)];
44689+ char tcomm[sizeof(task->comm)] = { 0 };
44690 unsigned long flags;
44691
44692+ pax_track_stack();
44693+
44694 state = *get_task_state(task);
44695 vsize = eip = esp = 0;
44696 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
44697@@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
44698 gtime = task_gtime(task);
44699 }
44700
44701+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44702+ if (PAX_RAND_FLAGS(mm)) {
44703+ eip = 0;
44704+ esp = 0;
44705+ wchan = 0;
44706+ }
44707+#endif
44708+#ifdef CONFIG_GRKERNSEC_HIDESYM
44709+ wchan = 0;
44710+ eip =0;
44711+ esp =0;
44712+#endif
44713+
44714 /* scale priority and nice values from timeslices to -20..20 */
44715 /* to make it look like a "normal" Unix priority/nice value */
44716 priority = task_prio(task);
44717@@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
44718 vsize,
44719 mm ? get_mm_rss(mm) : 0,
44720 rsslim,
44721+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44722+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
44723+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
44724+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
44725+#else
44726 mm ? (permitted ? mm->start_code : 1) : 0,
44727 mm ? (permitted ? mm->end_code : 1) : 0,
44728 (permitted && mm) ? mm->start_stack : 0,
44729+#endif
44730 esp,
44731 eip,
44732 /* The signal information here is obsolete.
44733@@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
44734
44735 return 0;
44736 }
44737+
44738+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
44739+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
44740+{
44741+ u32 curr_ip = 0;
44742+ unsigned long flags;
44743+
44744+ if (lock_task_sighand(task, &flags)) {
44745+ curr_ip = task->signal->curr_ip;
44746+ unlock_task_sighand(task, &flags);
44747+ }
44748+
44749+ return sprintf(buffer, "%pI4\n", &curr_ip);
44750+}
44751+#endif
44752diff -urNp linux-2.6.32.46/fs/proc/base.c linux-2.6.32.46/fs/proc/base.c
44753--- linux-2.6.32.46/fs/proc/base.c 2011-08-09 18:35:30.000000000 -0400
44754+++ linux-2.6.32.46/fs/proc/base.c 2011-08-09 18:34:33.000000000 -0400
44755@@ -102,6 +102,22 @@ struct pid_entry {
44756 union proc_op op;
44757 };
44758
44759+struct getdents_callback {
44760+ struct linux_dirent __user * current_dir;
44761+ struct linux_dirent __user * previous;
44762+ struct file * file;
44763+ int count;
44764+ int error;
44765+};
44766+
44767+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
44768+ loff_t offset, u64 ino, unsigned int d_type)
44769+{
44770+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
44771+ buf->error = -EINVAL;
44772+ return 0;
44773+}
44774+
44775 #define NOD(NAME, MODE, IOP, FOP, OP) { \
44776 .name = (NAME), \
44777 .len = sizeof(NAME) - 1, \
44778@@ -213,6 +229,9 @@ static int check_mem_permission(struct t
44779 if (task == current)
44780 return 0;
44781
44782+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
44783+ return -EPERM;
44784+
44785 /*
44786 * If current is actively ptrace'ing, and would also be
44787 * permitted to freshly attach with ptrace now, permit it.
44788@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
44789 if (!mm->arg_end)
44790 goto out_mm; /* Shh! No looking before we're done */
44791
44792+ if (gr_acl_handle_procpidmem(task))
44793+ goto out_mm;
44794+
44795 len = mm->arg_end - mm->arg_start;
44796
44797 if (len > PAGE_SIZE)
44798@@ -287,12 +309,28 @@ out:
44799 return res;
44800 }
44801
44802+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44803+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
44804+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
44805+ _mm->pax_flags & MF_PAX_SEGMEXEC))
44806+#endif
44807+
44808 static int proc_pid_auxv(struct task_struct *task, char *buffer)
44809 {
44810 int res = 0;
44811 struct mm_struct *mm = get_task_mm(task);
44812 if (mm) {
44813 unsigned int nwords = 0;
44814+
44815+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44816+ /* allow if we're currently ptracing this task */
44817+ if (PAX_RAND_FLAGS(mm) &&
44818+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
44819+ mmput(mm);
44820+ return res;
44821+ }
44822+#endif
44823+
44824 do {
44825 nwords += 2;
44826 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
44827@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
44828 }
44829
44830
44831-#ifdef CONFIG_KALLSYMS
44832+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
44833 /*
44834 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
44835 * Returns the resolved symbol. If that fails, simply return the address.
44836@@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
44837 }
44838 #endif /* CONFIG_KALLSYMS */
44839
44840-#ifdef CONFIG_STACKTRACE
44841+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
44842
44843 #define MAX_STACK_TRACE_DEPTH 64
44844
44845@@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
44846 return count;
44847 }
44848
44849-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
44850+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
44851 static int proc_pid_syscall(struct task_struct *task, char *buffer)
44852 {
44853 long nr;
44854@@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
44855 /************************************************************************/
44856
44857 /* permission checks */
44858-static int proc_fd_access_allowed(struct inode *inode)
44859+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
44860 {
44861 struct task_struct *task;
44862 int allowed = 0;
44863@@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
44864 */
44865 task = get_proc_task(inode);
44866 if (task) {
44867- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
44868+ if (log)
44869+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
44870+ else
44871+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
44872 put_task_struct(task);
44873 }
44874 return allowed;
44875@@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
44876 if (!task)
44877 goto out_no_task;
44878
44879+ if (gr_acl_handle_procpidmem(task))
44880+ goto out;
44881+
44882 if (!ptrace_may_access(task, PTRACE_MODE_READ))
44883 goto out;
44884
44885@@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
44886 path_put(&nd->path);
44887
44888 /* Are we allowed to snoop on the tasks file descriptors? */
44889- if (!proc_fd_access_allowed(inode))
44890+ if (!proc_fd_access_allowed(inode,0))
44891 goto out;
44892
44893 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
44894@@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
44895 struct path path;
44896
44897 /* Are we allowed to snoop on the tasks file descriptors? */
44898- if (!proc_fd_access_allowed(inode))
44899- goto out;
44900+ /* logging this is needed for learning on chromium to work properly,
44901+ but we don't want to flood the logs from 'ps' which does a readlink
44902+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
44903+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
44904+ */
44905+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
44906+ if (!proc_fd_access_allowed(inode,0))
44907+ goto out;
44908+ } else {
44909+ if (!proc_fd_access_allowed(inode,1))
44910+ goto out;
44911+ }
44912
44913 error = PROC_I(inode)->op.proc_get_link(inode, &path);
44914 if (error)
44915@@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
44916 rcu_read_lock();
44917 cred = __task_cred(task);
44918 inode->i_uid = cred->euid;
44919+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44920+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
44921+#else
44922 inode->i_gid = cred->egid;
44923+#endif
44924 rcu_read_unlock();
44925 }
44926 security_task_to_inode(task, inode);
44927@@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
44928 struct inode *inode = dentry->d_inode;
44929 struct task_struct *task;
44930 const struct cred *cred;
44931+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44932+ const struct cred *tmpcred = current_cred();
44933+#endif
44934
44935 generic_fillattr(inode, stat);
44936
44937@@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
44938 stat->uid = 0;
44939 stat->gid = 0;
44940 task = pid_task(proc_pid(inode), PIDTYPE_PID);
44941+
44942+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
44943+ rcu_read_unlock();
44944+ return -ENOENT;
44945+ }
44946+
44947 if (task) {
44948+ cred = __task_cred(task);
44949+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44950+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
44951+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44952+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
44953+#endif
44954+ ) {
44955+#endif
44956 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
44957+#ifdef CONFIG_GRKERNSEC_PROC_USER
44958+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
44959+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44960+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
44961+#endif
44962 task_dumpable(task)) {
44963- cred = __task_cred(task);
44964 stat->uid = cred->euid;
44965+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44966+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
44967+#else
44968 stat->gid = cred->egid;
44969+#endif
44970 }
44971+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44972+ } else {
44973+ rcu_read_unlock();
44974+ return -ENOENT;
44975+ }
44976+#endif
44977 }
44978 rcu_read_unlock();
44979 return 0;
44980@@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
44981
44982 if (task) {
44983 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
44984+#ifdef CONFIG_GRKERNSEC_PROC_USER
44985+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
44986+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
44987+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
44988+#endif
44989 task_dumpable(task)) {
44990 rcu_read_lock();
44991 cred = __task_cred(task);
44992 inode->i_uid = cred->euid;
44993+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
44994+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
44995+#else
44996 inode->i_gid = cred->egid;
44997+#endif
44998 rcu_read_unlock();
44999 } else {
45000 inode->i_uid = 0;
45001@@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
45002 int fd = proc_fd(inode);
45003
45004 if (task) {
45005- files = get_files_struct(task);
45006+ if (!gr_acl_handle_procpidmem(task))
45007+ files = get_files_struct(task);
45008 put_task_struct(task);
45009 }
45010 if (files) {
45011@@ -1895,12 +1994,22 @@ static const struct file_operations proc
45012 static int proc_fd_permission(struct inode *inode, int mask)
45013 {
45014 int rv;
45015+ struct task_struct *task;
45016
45017 rv = generic_permission(inode, mask, NULL);
45018- if (rv == 0)
45019- return 0;
45020+
45021 if (task_pid(current) == proc_pid(inode))
45022 rv = 0;
45023+
45024+ task = get_proc_task(inode);
45025+ if (task == NULL)
45026+ return rv;
45027+
45028+ if (gr_acl_handle_procpidmem(task))
45029+ rv = -EACCES;
45030+
45031+ put_task_struct(task);
45032+
45033 return rv;
45034 }
45035
45036@@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
45037 if (!task)
45038 goto out_no_task;
45039
45040+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45041+ goto out;
45042+
45043 /*
45044 * Yes, it does not scale. And it should not. Don't add
45045 * new entries into /proc/<tgid>/ without very good reasons.
45046@@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
45047 if (!task)
45048 goto out_no_task;
45049
45050+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45051+ goto out;
45052+
45053 ret = 0;
45054 i = filp->f_pos;
45055 switch (i) {
45056@@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
45057 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45058 void *cookie)
45059 {
45060- char *s = nd_get_link(nd);
45061+ const char *s = nd_get_link(nd);
45062 if (!IS_ERR(s))
45063 __putname(s);
45064 }
45065@@ -2522,7 +2637,7 @@ static const struct pid_entry tgid_base_
45066 #ifdef CONFIG_SCHED_DEBUG
45067 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45068 #endif
45069-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45070+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45071 INF("syscall", S_IRUSR, proc_pid_syscall),
45072 #endif
45073 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45074@@ -2547,10 +2662,10 @@ static const struct pid_entry tgid_base_
45075 #ifdef CONFIG_SECURITY
45076 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45077 #endif
45078-#ifdef CONFIG_KALLSYMS
45079+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45080 INF("wchan", S_IRUGO, proc_pid_wchan),
45081 #endif
45082-#ifdef CONFIG_STACKTRACE
45083+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45084 ONE("stack", S_IRUSR, proc_pid_stack),
45085 #endif
45086 #ifdef CONFIG_SCHEDSTATS
45087@@ -2580,6 +2695,9 @@ static const struct pid_entry tgid_base_
45088 #ifdef CONFIG_TASK_IO_ACCOUNTING
45089 INF("io", S_IRUSR, proc_tgid_io_accounting),
45090 #endif
45091+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45092+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45093+#endif
45094 };
45095
45096 static int proc_tgid_base_readdir(struct file * filp,
45097@@ -2704,7 +2822,14 @@ static struct dentry *proc_pid_instantia
45098 if (!inode)
45099 goto out;
45100
45101+#ifdef CONFIG_GRKERNSEC_PROC_USER
45102+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45103+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45104+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45105+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45106+#else
45107 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45108+#endif
45109 inode->i_op = &proc_tgid_base_inode_operations;
45110 inode->i_fop = &proc_tgid_base_operations;
45111 inode->i_flags|=S_IMMUTABLE;
45112@@ -2746,7 +2871,11 @@ struct dentry *proc_pid_lookup(struct in
45113 if (!task)
45114 goto out;
45115
45116+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45117+ goto out_put_task;
45118+
45119 result = proc_pid_instantiate(dir, dentry, task, NULL);
45120+out_put_task:
45121 put_task_struct(task);
45122 out:
45123 return result;
45124@@ -2811,6 +2940,11 @@ int proc_pid_readdir(struct file * filp,
45125 {
45126 unsigned int nr;
45127 struct task_struct *reaper;
45128+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45129+ const struct cred *tmpcred = current_cred();
45130+ const struct cred *itercred;
45131+#endif
45132+ filldir_t __filldir = filldir;
45133 struct tgid_iter iter;
45134 struct pid_namespace *ns;
45135
45136@@ -2834,8 +2968,27 @@ int proc_pid_readdir(struct file * filp,
45137 for (iter = next_tgid(ns, iter);
45138 iter.task;
45139 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45140+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45141+ rcu_read_lock();
45142+ itercred = __task_cred(iter.task);
45143+#endif
45144+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45145+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45146+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45147+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45148+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45149+#endif
45150+ )
45151+#endif
45152+ )
45153+ __filldir = &gr_fake_filldir;
45154+ else
45155+ __filldir = filldir;
45156+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45157+ rcu_read_unlock();
45158+#endif
45159 filp->f_pos = iter.tgid + TGID_OFFSET;
45160- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45161+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45162 put_task_struct(iter.task);
45163 goto out;
45164 }
45165@@ -2861,7 +3014,7 @@ static const struct pid_entry tid_base_s
45166 #ifdef CONFIG_SCHED_DEBUG
45167 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45168 #endif
45169-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45170+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45171 INF("syscall", S_IRUSR, proc_pid_syscall),
45172 #endif
45173 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45174@@ -2885,10 +3038,10 @@ static const struct pid_entry tid_base_s
45175 #ifdef CONFIG_SECURITY
45176 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45177 #endif
45178-#ifdef CONFIG_KALLSYMS
45179+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45180 INF("wchan", S_IRUGO, proc_pid_wchan),
45181 #endif
45182-#ifdef CONFIG_STACKTRACE
45183+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45184 ONE("stack", S_IRUSR, proc_pid_stack),
45185 #endif
45186 #ifdef CONFIG_SCHEDSTATS
45187diff -urNp linux-2.6.32.46/fs/proc/cmdline.c linux-2.6.32.46/fs/proc/cmdline.c
45188--- linux-2.6.32.46/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
45189+++ linux-2.6.32.46/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
45190@@ -23,7 +23,11 @@ static const struct file_operations cmdl
45191
45192 static int __init proc_cmdline_init(void)
45193 {
45194+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45195+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45196+#else
45197 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45198+#endif
45199 return 0;
45200 }
45201 module_init(proc_cmdline_init);
45202diff -urNp linux-2.6.32.46/fs/proc/devices.c linux-2.6.32.46/fs/proc/devices.c
45203--- linux-2.6.32.46/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
45204+++ linux-2.6.32.46/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
45205@@ -64,7 +64,11 @@ static const struct file_operations proc
45206
45207 static int __init proc_devices_init(void)
45208 {
45209+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45210+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45211+#else
45212 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45213+#endif
45214 return 0;
45215 }
45216 module_init(proc_devices_init);
45217diff -urNp linux-2.6.32.46/fs/proc/inode.c linux-2.6.32.46/fs/proc/inode.c
45218--- linux-2.6.32.46/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
45219+++ linux-2.6.32.46/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
45220@@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
45221 if (de->mode) {
45222 inode->i_mode = de->mode;
45223 inode->i_uid = de->uid;
45224+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45225+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45226+#else
45227 inode->i_gid = de->gid;
45228+#endif
45229 }
45230 if (de->size)
45231 inode->i_size = de->size;
45232diff -urNp linux-2.6.32.46/fs/proc/internal.h linux-2.6.32.46/fs/proc/internal.h
45233--- linux-2.6.32.46/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
45234+++ linux-2.6.32.46/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
45235@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
45236 struct pid *pid, struct task_struct *task);
45237 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45238 struct pid *pid, struct task_struct *task);
45239+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45240+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45241+#endif
45242 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45243
45244 extern const struct file_operations proc_maps_operations;
45245diff -urNp linux-2.6.32.46/fs/proc/Kconfig linux-2.6.32.46/fs/proc/Kconfig
45246--- linux-2.6.32.46/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
45247+++ linux-2.6.32.46/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
45248@@ -30,12 +30,12 @@ config PROC_FS
45249
45250 config PROC_KCORE
45251 bool "/proc/kcore support" if !ARM
45252- depends on PROC_FS && MMU
45253+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45254
45255 config PROC_VMCORE
45256 bool "/proc/vmcore support (EXPERIMENTAL)"
45257- depends on PROC_FS && CRASH_DUMP
45258- default y
45259+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45260+ default n
45261 help
45262 Exports the dump image of crashed kernel in ELF format.
45263
45264@@ -59,8 +59,8 @@ config PROC_SYSCTL
45265 limited in memory.
45266
45267 config PROC_PAGE_MONITOR
45268- default y
45269- depends on PROC_FS && MMU
45270+ default n
45271+ depends on PROC_FS && MMU && !GRKERNSEC
45272 bool "Enable /proc page monitoring" if EMBEDDED
45273 help
45274 Various /proc files exist to monitor process memory utilization:
45275diff -urNp linux-2.6.32.46/fs/proc/kcore.c linux-2.6.32.46/fs/proc/kcore.c
45276--- linux-2.6.32.46/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
45277+++ linux-2.6.32.46/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
45278@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
45279 off_t offset = 0;
45280 struct kcore_list *m;
45281
45282+ pax_track_stack();
45283+
45284 /* setup ELF header */
45285 elf = (struct elfhdr *) bufp;
45286 bufp += sizeof(struct elfhdr);
45287@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
45288 * the addresses in the elf_phdr on our list.
45289 */
45290 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45291- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45292+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45293+ if (tsz > buflen)
45294 tsz = buflen;
45295-
45296+
45297 while (buflen) {
45298 struct kcore_list *m;
45299
45300@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
45301 kfree(elf_buf);
45302 } else {
45303 if (kern_addr_valid(start)) {
45304- unsigned long n;
45305+ char *elf_buf;
45306+ mm_segment_t oldfs;
45307
45308- n = copy_to_user(buffer, (char *)start, tsz);
45309- /*
45310- * We cannot distingush between fault on source
45311- * and fault on destination. When this happens
45312- * we clear too and hope it will trigger the
45313- * EFAULT again.
45314- */
45315- if (n) {
45316- if (clear_user(buffer + tsz - n,
45317- n))
45318+ elf_buf = kmalloc(tsz, GFP_KERNEL);
45319+ if (!elf_buf)
45320+ return -ENOMEM;
45321+ oldfs = get_fs();
45322+ set_fs(KERNEL_DS);
45323+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
45324+ set_fs(oldfs);
45325+ if (copy_to_user(buffer, elf_buf, tsz)) {
45326+ kfree(elf_buf);
45327 return -EFAULT;
45328+ }
45329 }
45330+ set_fs(oldfs);
45331+ kfree(elf_buf);
45332 } else {
45333 if (clear_user(buffer, tsz))
45334 return -EFAULT;
45335@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
45336
45337 static int open_kcore(struct inode *inode, struct file *filp)
45338 {
45339+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
45340+ return -EPERM;
45341+#endif
45342 if (!capable(CAP_SYS_RAWIO))
45343 return -EPERM;
45344 if (kcore_need_update)
45345diff -urNp linux-2.6.32.46/fs/proc/meminfo.c linux-2.6.32.46/fs/proc/meminfo.c
45346--- linux-2.6.32.46/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
45347+++ linux-2.6.32.46/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
45348@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
45349 unsigned long pages[NR_LRU_LISTS];
45350 int lru;
45351
45352+ pax_track_stack();
45353+
45354 /*
45355 * display in kilobytes.
45356 */
45357@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
45358 vmi.used >> 10,
45359 vmi.largest_chunk >> 10
45360 #ifdef CONFIG_MEMORY_FAILURE
45361- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
45362+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
45363 #endif
45364 );
45365
45366diff -urNp linux-2.6.32.46/fs/proc/nommu.c linux-2.6.32.46/fs/proc/nommu.c
45367--- linux-2.6.32.46/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
45368+++ linux-2.6.32.46/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
45369@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
45370 if (len < 1)
45371 len = 1;
45372 seq_printf(m, "%*c", len, ' ');
45373- seq_path(m, &file->f_path, "");
45374+ seq_path(m, &file->f_path, "\n\\");
45375 }
45376
45377 seq_putc(m, '\n');
45378diff -urNp linux-2.6.32.46/fs/proc/proc_net.c linux-2.6.32.46/fs/proc/proc_net.c
45379--- linux-2.6.32.46/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
45380+++ linux-2.6.32.46/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
45381@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
45382 struct task_struct *task;
45383 struct nsproxy *ns;
45384 struct net *net = NULL;
45385+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45386+ const struct cred *cred = current_cred();
45387+#endif
45388+
45389+#ifdef CONFIG_GRKERNSEC_PROC_USER
45390+ if (cred->fsuid)
45391+ return net;
45392+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45393+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
45394+ return net;
45395+#endif
45396
45397 rcu_read_lock();
45398 task = pid_task(proc_pid(dir), PIDTYPE_PID);
45399diff -urNp linux-2.6.32.46/fs/proc/proc_sysctl.c linux-2.6.32.46/fs/proc/proc_sysctl.c
45400--- linux-2.6.32.46/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
45401+++ linux-2.6.32.46/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
45402@@ -7,6 +7,8 @@
45403 #include <linux/security.h>
45404 #include "internal.h"
45405
45406+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
45407+
45408 static const struct dentry_operations proc_sys_dentry_operations;
45409 static const struct file_operations proc_sys_file_operations;
45410 static const struct inode_operations proc_sys_inode_operations;
45411@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
45412 if (!p)
45413 goto out;
45414
45415+ if (gr_handle_sysctl(p, MAY_EXEC))
45416+ goto out;
45417+
45418 err = ERR_PTR(-ENOMEM);
45419 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
45420 if (h)
45421@@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
45422 if (*pos < file->f_pos)
45423 continue;
45424
45425+ if (gr_handle_sysctl(table, 0))
45426+ continue;
45427+
45428 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
45429 if (res)
45430 return res;
45431@@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
45432 if (IS_ERR(head))
45433 return PTR_ERR(head);
45434
45435+ if (table && gr_handle_sysctl(table, MAY_EXEC))
45436+ return -ENOENT;
45437+
45438 generic_fillattr(inode, stat);
45439 if (table)
45440 stat->mode = (stat->mode & S_IFMT) | table->mode;
45441diff -urNp linux-2.6.32.46/fs/proc/root.c linux-2.6.32.46/fs/proc/root.c
45442--- linux-2.6.32.46/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
45443+++ linux-2.6.32.46/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
45444@@ -134,7 +134,15 @@ void __init proc_root_init(void)
45445 #ifdef CONFIG_PROC_DEVICETREE
45446 proc_device_tree_init();
45447 #endif
45448+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45449+#ifdef CONFIG_GRKERNSEC_PROC_USER
45450+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
45451+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45452+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
45453+#endif
45454+#else
45455 proc_mkdir("bus", NULL);
45456+#endif
45457 proc_sys_init();
45458 }
45459
45460diff -urNp linux-2.6.32.46/fs/proc/task_mmu.c linux-2.6.32.46/fs/proc/task_mmu.c
45461--- linux-2.6.32.46/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
45462+++ linux-2.6.32.46/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
45463@@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
45464 "VmStk:\t%8lu kB\n"
45465 "VmExe:\t%8lu kB\n"
45466 "VmLib:\t%8lu kB\n"
45467- "VmPTE:\t%8lu kB\n",
45468- hiwater_vm << (PAGE_SHIFT-10),
45469+ "VmPTE:\t%8lu kB\n"
45470+
45471+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45472+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
45473+#endif
45474+
45475+ ,hiwater_vm << (PAGE_SHIFT-10),
45476 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
45477 mm->locked_vm << (PAGE_SHIFT-10),
45478 hiwater_rss << (PAGE_SHIFT-10),
45479 total_rss << (PAGE_SHIFT-10),
45480 data << (PAGE_SHIFT-10),
45481 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
45482- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
45483+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
45484+
45485+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45486+ , mm->context.user_cs_base, mm->context.user_cs_limit
45487+#endif
45488+
45489+ );
45490 }
45491
45492 unsigned long task_vsize(struct mm_struct *mm)
45493@@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
45494 struct proc_maps_private *priv = m->private;
45495 struct vm_area_struct *vma = v;
45496
45497- vma_stop(priv, vma);
45498+ if (!IS_ERR(vma))
45499+ vma_stop(priv, vma);
45500 if (priv->task)
45501 put_task_struct(priv->task);
45502 }
45503@@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
45504 return ret;
45505 }
45506
45507+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45508+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45509+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45510+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45511+#endif
45512+
45513 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
45514 {
45515 struct mm_struct *mm = vma->vm_mm;
45516@@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
45517 int flags = vma->vm_flags;
45518 unsigned long ino = 0;
45519 unsigned long long pgoff = 0;
45520- unsigned long start;
45521 dev_t dev = 0;
45522 int len;
45523
45524@@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
45525 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
45526 }
45527
45528- /* We don't show the stack guard page in /proc/maps */
45529- start = vma->vm_start;
45530- if (vma->vm_flags & VM_GROWSDOWN)
45531- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
45532- start += PAGE_SIZE;
45533-
45534 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
45535- start,
45536+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45537+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
45538+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
45539+#else
45540+ vma->vm_start,
45541 vma->vm_end,
45542+#endif
45543 flags & VM_READ ? 'r' : '-',
45544 flags & VM_WRITE ? 'w' : '-',
45545 flags & VM_EXEC ? 'x' : '-',
45546 flags & VM_MAYSHARE ? 's' : 'p',
45547+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45548+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
45549+#else
45550 pgoff,
45551+#endif
45552 MAJOR(dev), MINOR(dev), ino, &len);
45553
45554 /*
45555@@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
45556 */
45557 if (file) {
45558 pad_len_spaces(m, len);
45559- seq_path(m, &file->f_path, "\n");
45560+ seq_path(m, &file->f_path, "\n\\");
45561 } else {
45562 const char *name = arch_vma_name(vma);
45563 if (!name) {
45564@@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
45565 if (vma->vm_start <= mm->brk &&
45566 vma->vm_end >= mm->start_brk) {
45567 name = "[heap]";
45568- } else if (vma->vm_start <= mm->start_stack &&
45569- vma->vm_end >= mm->start_stack) {
45570+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
45571+ (vma->vm_start <= mm->start_stack &&
45572+ vma->vm_end >= mm->start_stack)) {
45573 name = "[stack]";
45574 }
45575 } else {
45576@@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
45577 };
45578
45579 memset(&mss, 0, sizeof mss);
45580- mss.vma = vma;
45581- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45582- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45583+
45584+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45585+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
45586+#endif
45587+ mss.vma = vma;
45588+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
45589+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
45590+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45591+ }
45592+#endif
45593
45594 show_map_vma(m, vma);
45595
45596@@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
45597 "Swap: %8lu kB\n"
45598 "KernelPageSize: %8lu kB\n"
45599 "MMUPageSize: %8lu kB\n",
45600+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45601+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
45602+#else
45603 (vma->vm_end - vma->vm_start) >> 10,
45604+#endif
45605 mss.resident >> 10,
45606 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
45607 mss.shared_clean >> 10,
45608diff -urNp linux-2.6.32.46/fs/proc/task_nommu.c linux-2.6.32.46/fs/proc/task_nommu.c
45609--- linux-2.6.32.46/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
45610+++ linux-2.6.32.46/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
45611@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
45612 else
45613 bytes += kobjsize(mm);
45614
45615- if (current->fs && current->fs->users > 1)
45616+ if (current->fs && atomic_read(&current->fs->users) > 1)
45617 sbytes += kobjsize(current->fs);
45618 else
45619 bytes += kobjsize(current->fs);
45620@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
45621 if (len < 1)
45622 len = 1;
45623 seq_printf(m, "%*c", len, ' ');
45624- seq_path(m, &file->f_path, "");
45625+ seq_path(m, &file->f_path, "\n\\");
45626 }
45627
45628 seq_putc(m, '\n');
45629diff -urNp linux-2.6.32.46/fs/readdir.c linux-2.6.32.46/fs/readdir.c
45630--- linux-2.6.32.46/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
45631+++ linux-2.6.32.46/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
45632@@ -16,6 +16,7 @@
45633 #include <linux/security.h>
45634 #include <linux/syscalls.h>
45635 #include <linux/unistd.h>
45636+#include <linux/namei.h>
45637
45638 #include <asm/uaccess.h>
45639
45640@@ -67,6 +68,7 @@ struct old_linux_dirent {
45641
45642 struct readdir_callback {
45643 struct old_linux_dirent __user * dirent;
45644+ struct file * file;
45645 int result;
45646 };
45647
45648@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
45649 buf->result = -EOVERFLOW;
45650 return -EOVERFLOW;
45651 }
45652+
45653+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45654+ return 0;
45655+
45656 buf->result++;
45657 dirent = buf->dirent;
45658 if (!access_ok(VERIFY_WRITE, dirent,
45659@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
45660
45661 buf.result = 0;
45662 buf.dirent = dirent;
45663+ buf.file = file;
45664
45665 error = vfs_readdir(file, fillonedir, &buf);
45666 if (buf.result)
45667@@ -142,6 +149,7 @@ struct linux_dirent {
45668 struct getdents_callback {
45669 struct linux_dirent __user * current_dir;
45670 struct linux_dirent __user * previous;
45671+ struct file * file;
45672 int count;
45673 int error;
45674 };
45675@@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
45676 buf->error = -EOVERFLOW;
45677 return -EOVERFLOW;
45678 }
45679+
45680+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45681+ return 0;
45682+
45683 dirent = buf->previous;
45684 if (dirent) {
45685 if (__put_user(offset, &dirent->d_off))
45686@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
45687 buf.previous = NULL;
45688 buf.count = count;
45689 buf.error = 0;
45690+ buf.file = file;
45691
45692 error = vfs_readdir(file, filldir, &buf);
45693 if (error >= 0)
45694@@ -228,6 +241,7 @@ out:
45695 struct getdents_callback64 {
45696 struct linux_dirent64 __user * current_dir;
45697 struct linux_dirent64 __user * previous;
45698+ struct file *file;
45699 int count;
45700 int error;
45701 };
45702@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
45703 buf->error = -EINVAL; /* only used if we fail.. */
45704 if (reclen > buf->count)
45705 return -EINVAL;
45706+
45707+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
45708+ return 0;
45709+
45710 dirent = buf->previous;
45711 if (dirent) {
45712 if (__put_user(offset, &dirent->d_off))
45713@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
45714
45715 buf.current_dir = dirent;
45716 buf.previous = NULL;
45717+ buf.file = file;
45718 buf.count = count;
45719 buf.error = 0;
45720
45721diff -urNp linux-2.6.32.46/fs/reiserfs/dir.c linux-2.6.32.46/fs/reiserfs/dir.c
45722--- linux-2.6.32.46/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
45723+++ linux-2.6.32.46/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
45724@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
45725 struct reiserfs_dir_entry de;
45726 int ret = 0;
45727
45728+ pax_track_stack();
45729+
45730 reiserfs_write_lock(inode->i_sb);
45731
45732 reiserfs_check_lock_depth(inode->i_sb, "readdir");
45733diff -urNp linux-2.6.32.46/fs/reiserfs/do_balan.c linux-2.6.32.46/fs/reiserfs/do_balan.c
45734--- linux-2.6.32.46/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
45735+++ linux-2.6.32.46/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
45736@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
45737 return;
45738 }
45739
45740- atomic_inc(&(fs_generation(tb->tb_sb)));
45741+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
45742 do_balance_starts(tb);
45743
45744 /* balance leaf returns 0 except if combining L R and S into
45745diff -urNp linux-2.6.32.46/fs/reiserfs/item_ops.c linux-2.6.32.46/fs/reiserfs/item_ops.c
45746--- linux-2.6.32.46/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
45747+++ linux-2.6.32.46/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
45748@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
45749 vi->vi_index, vi->vi_type, vi->vi_ih);
45750 }
45751
45752-static struct item_operations stat_data_ops = {
45753+static const struct item_operations stat_data_ops = {
45754 .bytes_number = sd_bytes_number,
45755 .decrement_key = sd_decrement_key,
45756 .is_left_mergeable = sd_is_left_mergeable,
45757@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
45758 vi->vi_index, vi->vi_type, vi->vi_ih);
45759 }
45760
45761-static struct item_operations direct_ops = {
45762+static const struct item_operations direct_ops = {
45763 .bytes_number = direct_bytes_number,
45764 .decrement_key = direct_decrement_key,
45765 .is_left_mergeable = direct_is_left_mergeable,
45766@@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
45767 vi->vi_index, vi->vi_type, vi->vi_ih);
45768 }
45769
45770-static struct item_operations indirect_ops = {
45771+static const struct item_operations indirect_ops = {
45772 .bytes_number = indirect_bytes_number,
45773 .decrement_key = indirect_decrement_key,
45774 .is_left_mergeable = indirect_is_left_mergeable,
45775@@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
45776 printk("\n");
45777 }
45778
45779-static struct item_operations direntry_ops = {
45780+static const struct item_operations direntry_ops = {
45781 .bytes_number = direntry_bytes_number,
45782 .decrement_key = direntry_decrement_key,
45783 .is_left_mergeable = direntry_is_left_mergeable,
45784@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
45785 "Invalid item type observed, run fsck ASAP");
45786 }
45787
45788-static struct item_operations errcatch_ops = {
45789+static const struct item_operations errcatch_ops = {
45790 errcatch_bytes_number,
45791 errcatch_decrement_key,
45792 errcatch_is_left_mergeable,
45793@@ -746,7 +746,7 @@ static struct item_operations errcatch_o
45794 #error Item types must use disk-format assigned values.
45795 #endif
45796
45797-struct item_operations *item_ops[TYPE_ANY + 1] = {
45798+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
45799 &stat_data_ops,
45800 &indirect_ops,
45801 &direct_ops,
45802diff -urNp linux-2.6.32.46/fs/reiserfs/journal.c linux-2.6.32.46/fs/reiserfs/journal.c
45803--- linux-2.6.32.46/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
45804+++ linux-2.6.32.46/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
45805@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
45806 struct buffer_head *bh;
45807 int i, j;
45808
45809+ pax_track_stack();
45810+
45811 bh = __getblk(dev, block, bufsize);
45812 if (buffer_uptodate(bh))
45813 return (bh);
45814diff -urNp linux-2.6.32.46/fs/reiserfs/namei.c linux-2.6.32.46/fs/reiserfs/namei.c
45815--- linux-2.6.32.46/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
45816+++ linux-2.6.32.46/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
45817@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
45818 unsigned long savelink = 1;
45819 struct timespec ctime;
45820
45821+ pax_track_stack();
45822+
45823 /* three balancings: (1) old name removal, (2) new name insertion
45824 and (3) maybe "save" link insertion
45825 stat data updates: (1) old directory,
45826diff -urNp linux-2.6.32.46/fs/reiserfs/procfs.c linux-2.6.32.46/fs/reiserfs/procfs.c
45827--- linux-2.6.32.46/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
45828+++ linux-2.6.32.46/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
45829@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
45830 "SMALL_TAILS " : "NO_TAILS ",
45831 replay_only(sb) ? "REPLAY_ONLY " : "",
45832 convert_reiserfs(sb) ? "CONV " : "",
45833- atomic_read(&r->s_generation_counter),
45834+ atomic_read_unchecked(&r->s_generation_counter),
45835 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
45836 SF(s_do_balance), SF(s_unneeded_left_neighbor),
45837 SF(s_good_search_by_key_reada), SF(s_bmaps),
45838@@ -309,6 +309,8 @@ static int show_journal(struct seq_file
45839 struct journal_params *jp = &rs->s_v1.s_journal;
45840 char b[BDEVNAME_SIZE];
45841
45842+ pax_track_stack();
45843+
45844 seq_printf(m, /* on-disk fields */
45845 "jp_journal_1st_block: \t%i\n"
45846 "jp_journal_dev: \t%s[%x]\n"
45847diff -urNp linux-2.6.32.46/fs/reiserfs/stree.c linux-2.6.32.46/fs/reiserfs/stree.c
45848--- linux-2.6.32.46/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
45849+++ linux-2.6.32.46/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
45850@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
45851 int iter = 0;
45852 #endif
45853
45854+ pax_track_stack();
45855+
45856 BUG_ON(!th->t_trans_id);
45857
45858 init_tb_struct(th, &s_del_balance, sb, path,
45859@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
45860 int retval;
45861 int quota_cut_bytes = 0;
45862
45863+ pax_track_stack();
45864+
45865 BUG_ON(!th->t_trans_id);
45866
45867 le_key2cpu_key(&cpu_key, key);
45868@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
45869 int quota_cut_bytes;
45870 loff_t tail_pos = 0;
45871
45872+ pax_track_stack();
45873+
45874 BUG_ON(!th->t_trans_id);
45875
45876 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
45877@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
45878 int retval;
45879 int fs_gen;
45880
45881+ pax_track_stack();
45882+
45883 BUG_ON(!th->t_trans_id);
45884
45885 fs_gen = get_generation(inode->i_sb);
45886@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
45887 int fs_gen = 0;
45888 int quota_bytes = 0;
45889
45890+ pax_track_stack();
45891+
45892 BUG_ON(!th->t_trans_id);
45893
45894 if (inode) { /* Do we count quotas for item? */
45895diff -urNp linux-2.6.32.46/fs/reiserfs/super.c linux-2.6.32.46/fs/reiserfs/super.c
45896--- linux-2.6.32.46/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
45897+++ linux-2.6.32.46/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
45898@@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
45899 {.option_name = NULL}
45900 };
45901
45902+ pax_track_stack();
45903+
45904 *blocks = 0;
45905 if (!options || !*options)
45906 /* use default configuration: create tails, journaling on, no
45907diff -urNp linux-2.6.32.46/fs/select.c linux-2.6.32.46/fs/select.c
45908--- linux-2.6.32.46/fs/select.c 2011-03-27 14:31:47.000000000 -0400
45909+++ linux-2.6.32.46/fs/select.c 2011-05-16 21:46:57.000000000 -0400
45910@@ -20,6 +20,7 @@
45911 #include <linux/module.h>
45912 #include <linux/slab.h>
45913 #include <linux/poll.h>
45914+#include <linux/security.h>
45915 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
45916 #include <linux/file.h>
45917 #include <linux/fdtable.h>
45918@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
45919 int retval, i, timed_out = 0;
45920 unsigned long slack = 0;
45921
45922+ pax_track_stack();
45923+
45924 rcu_read_lock();
45925 retval = max_select_fd(n, fds);
45926 rcu_read_unlock();
45927@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
45928 /* Allocate small arguments on the stack to save memory and be faster */
45929 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
45930
45931+ pax_track_stack();
45932+
45933 ret = -EINVAL;
45934 if (n < 0)
45935 goto out_nofds;
45936@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
45937 struct poll_list *walk = head;
45938 unsigned long todo = nfds;
45939
45940+ pax_track_stack();
45941+
45942+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
45943 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
45944 return -EINVAL;
45945
45946diff -urNp linux-2.6.32.46/fs/seq_file.c linux-2.6.32.46/fs/seq_file.c
45947--- linux-2.6.32.46/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
45948+++ linux-2.6.32.46/fs/seq_file.c 2011-08-23 21:22:32.000000000 -0400
45949@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
45950 return 0;
45951 }
45952 if (!m->buf) {
45953- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
45954+ m->size = PAGE_SIZE;
45955+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
45956 if (!m->buf)
45957 return -ENOMEM;
45958 }
45959@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
45960 Eoverflow:
45961 m->op->stop(m, p);
45962 kfree(m->buf);
45963- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
45964+ m->size <<= 1;
45965+ m->buf = kmalloc(m->size, GFP_KERNEL);
45966 return !m->buf ? -ENOMEM : -EAGAIN;
45967 }
45968
45969@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
45970 m->version = file->f_version;
45971 /* grab buffer if we didn't have one */
45972 if (!m->buf) {
45973- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
45974+ m->size = PAGE_SIZE;
45975+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
45976 if (!m->buf)
45977 goto Enomem;
45978 }
45979@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
45980 goto Fill;
45981 m->op->stop(m, p);
45982 kfree(m->buf);
45983- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
45984+ m->size <<= 1;
45985+ m->buf = kmalloc(m->size, GFP_KERNEL);
45986 if (!m->buf)
45987 goto Enomem;
45988 m->count = 0;
45989@@ -551,7 +555,7 @@ static void single_stop(struct seq_file
45990 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
45991 void *data)
45992 {
45993- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
45994+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
45995 int res = -ENOMEM;
45996
45997 if (op) {
45998diff -urNp linux-2.6.32.46/fs/smbfs/proc.c linux-2.6.32.46/fs/smbfs/proc.c
45999--- linux-2.6.32.46/fs/smbfs/proc.c 2011-03-27 14:31:47.000000000 -0400
46000+++ linux-2.6.32.46/fs/smbfs/proc.c 2011-08-05 20:33:55.000000000 -0400
46001@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *
46002
46003 out:
46004 if (server->local_nls != NULL && server->remote_nls != NULL)
46005- server->ops->convert = convert_cp;
46006+ *(void **)&server->ops->convert = convert_cp;
46007 else
46008- server->ops->convert = convert_memcpy;
46009+ *(void **)&server->ops->convert = convert_memcpy;
46010
46011 smb_unlock_server(server);
46012 return n;
46013@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server,
46014
46015 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
46016 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
46017- server->ops->getattr = smb_proc_getattr_core;
46018+ *(void **)&server->ops->getattr = smb_proc_getattr_core;
46019 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
46020- server->ops->getattr = smb_proc_getattr_ff;
46021+ *(void **)&server->ops->getattr = smb_proc_getattr_ff;
46022 }
46023
46024 /* Decode server capabilities */
46025@@ -3439,7 +3439,7 @@ out:
46026 static void
46027 install_ops(struct smb_ops *dst, struct smb_ops *src)
46028 {
46029- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46030+ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46031 }
46032
46033 /* < LANMAN2 */
46034diff -urNp linux-2.6.32.46/fs/smbfs/symlink.c linux-2.6.32.46/fs/smbfs/symlink.c
46035--- linux-2.6.32.46/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46036+++ linux-2.6.32.46/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46037@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
46038
46039 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46040 {
46041- char *s = nd_get_link(nd);
46042+ const char *s = nd_get_link(nd);
46043 if (!IS_ERR(s))
46044 __putname(s);
46045 }
46046diff -urNp linux-2.6.32.46/fs/splice.c linux-2.6.32.46/fs/splice.c
46047--- linux-2.6.32.46/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
46048+++ linux-2.6.32.46/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
46049@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
46050 pipe_lock(pipe);
46051
46052 for (;;) {
46053- if (!pipe->readers) {
46054+ if (!atomic_read(&pipe->readers)) {
46055 send_sig(SIGPIPE, current, 0);
46056 if (!ret)
46057 ret = -EPIPE;
46058@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
46059 do_wakeup = 0;
46060 }
46061
46062- pipe->waiting_writers++;
46063+ atomic_inc(&pipe->waiting_writers);
46064 pipe_wait(pipe);
46065- pipe->waiting_writers--;
46066+ atomic_dec(&pipe->waiting_writers);
46067 }
46068
46069 pipe_unlock(pipe);
46070@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
46071 .spd_release = spd_release_page,
46072 };
46073
46074+ pax_track_stack();
46075+
46076 index = *ppos >> PAGE_CACHE_SHIFT;
46077 loff = *ppos & ~PAGE_CACHE_MASK;
46078 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46079@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
46080 old_fs = get_fs();
46081 set_fs(get_ds());
46082 /* The cast to a user pointer is valid due to the set_fs() */
46083- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46084+ res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
46085 set_fs(old_fs);
46086
46087 return res;
46088@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
46089 old_fs = get_fs();
46090 set_fs(get_ds());
46091 /* The cast to a user pointer is valid due to the set_fs() */
46092- res = vfs_write(file, (const char __user *)buf, count, &pos);
46093+ res = vfs_write(file, (__force const char __user *)buf, count, &pos);
46094 set_fs(old_fs);
46095
46096 return res;
46097@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
46098 .spd_release = spd_release_page,
46099 };
46100
46101+ pax_track_stack();
46102+
46103 index = *ppos >> PAGE_CACHE_SHIFT;
46104 offset = *ppos & ~PAGE_CACHE_MASK;
46105 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46106@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
46107 goto err;
46108
46109 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46110- vec[i].iov_base = (void __user *) page_address(page);
46111+ vec[i].iov_base = (__force void __user *) page_address(page);
46112 vec[i].iov_len = this_len;
46113 pages[i] = page;
46114 spd.nr_pages++;
46115@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46116 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46117 {
46118 while (!pipe->nrbufs) {
46119- if (!pipe->writers)
46120+ if (!atomic_read(&pipe->writers))
46121 return 0;
46122
46123- if (!pipe->waiting_writers && sd->num_spliced)
46124+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46125 return 0;
46126
46127 if (sd->flags & SPLICE_F_NONBLOCK)
46128@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
46129 * out of the pipe right after the splice_to_pipe(). So set
46130 * PIPE_READERS appropriately.
46131 */
46132- pipe->readers = 1;
46133+ atomic_set(&pipe->readers, 1);
46134
46135 current->splice_pipe = pipe;
46136 }
46137@@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
46138 .spd_release = spd_release_page,
46139 };
46140
46141+ pax_track_stack();
46142+
46143 pipe = pipe_info(file->f_path.dentry->d_inode);
46144 if (!pipe)
46145 return -EBADF;
46146@@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
46147 ret = -ERESTARTSYS;
46148 break;
46149 }
46150- if (!pipe->writers)
46151+ if (!atomic_read(&pipe->writers))
46152 break;
46153- if (!pipe->waiting_writers) {
46154+ if (!atomic_read(&pipe->waiting_writers)) {
46155 if (flags & SPLICE_F_NONBLOCK) {
46156 ret = -EAGAIN;
46157 break;
46158@@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
46159 pipe_lock(pipe);
46160
46161 while (pipe->nrbufs >= PIPE_BUFFERS) {
46162- if (!pipe->readers) {
46163+ if (!atomic_read(&pipe->readers)) {
46164 send_sig(SIGPIPE, current, 0);
46165 ret = -EPIPE;
46166 break;
46167@@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
46168 ret = -ERESTARTSYS;
46169 break;
46170 }
46171- pipe->waiting_writers++;
46172+ atomic_inc(&pipe->waiting_writers);
46173 pipe_wait(pipe);
46174- pipe->waiting_writers--;
46175+ atomic_dec(&pipe->waiting_writers);
46176 }
46177
46178 pipe_unlock(pipe);
46179@@ -1785,14 +1791,14 @@ retry:
46180 pipe_double_lock(ipipe, opipe);
46181
46182 do {
46183- if (!opipe->readers) {
46184+ if (!atomic_read(&opipe->readers)) {
46185 send_sig(SIGPIPE, current, 0);
46186 if (!ret)
46187 ret = -EPIPE;
46188 break;
46189 }
46190
46191- if (!ipipe->nrbufs && !ipipe->writers)
46192+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46193 break;
46194
46195 /*
46196@@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
46197 pipe_double_lock(ipipe, opipe);
46198
46199 do {
46200- if (!opipe->readers) {
46201+ if (!atomic_read(&opipe->readers)) {
46202 send_sig(SIGPIPE, current, 0);
46203 if (!ret)
46204 ret = -EPIPE;
46205@@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
46206 * return EAGAIN if we have the potential of some data in the
46207 * future, otherwise just return 0
46208 */
46209- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46210+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46211 ret = -EAGAIN;
46212
46213 pipe_unlock(ipipe);
46214diff -urNp linux-2.6.32.46/fs/sysfs/file.c linux-2.6.32.46/fs/sysfs/file.c
46215--- linux-2.6.32.46/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
46216+++ linux-2.6.32.46/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
46217@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46218
46219 struct sysfs_open_dirent {
46220 atomic_t refcnt;
46221- atomic_t event;
46222+ atomic_unchecked_t event;
46223 wait_queue_head_t poll;
46224 struct list_head buffers; /* goes through sysfs_buffer.list */
46225 };
46226@@ -53,7 +53,7 @@ struct sysfs_buffer {
46227 size_t count;
46228 loff_t pos;
46229 char * page;
46230- struct sysfs_ops * ops;
46231+ const struct sysfs_ops * ops;
46232 struct mutex mutex;
46233 int needs_read_fill;
46234 int event;
46235@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
46236 {
46237 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46238 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46239- struct sysfs_ops * ops = buffer->ops;
46240+ const struct sysfs_ops * ops = buffer->ops;
46241 int ret = 0;
46242 ssize_t count;
46243
46244@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
46245 if (!sysfs_get_active_two(attr_sd))
46246 return -ENODEV;
46247
46248- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46249+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46250 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46251
46252 sysfs_put_active_two(attr_sd);
46253@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
46254 {
46255 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46256 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46257- struct sysfs_ops * ops = buffer->ops;
46258+ const struct sysfs_ops * ops = buffer->ops;
46259 int rc;
46260
46261 /* need attr_sd for attr and ops, its parent for kobj */
46262@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
46263 return -ENOMEM;
46264
46265 atomic_set(&new_od->refcnt, 0);
46266- atomic_set(&new_od->event, 1);
46267+ atomic_set_unchecked(&new_od->event, 1);
46268 init_waitqueue_head(&new_od->poll);
46269 INIT_LIST_HEAD(&new_od->buffers);
46270 goto retry;
46271@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
46272 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
46273 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46274 struct sysfs_buffer *buffer;
46275- struct sysfs_ops *ops;
46276+ const struct sysfs_ops *ops;
46277 int error = -EACCES;
46278 char *p;
46279
46280@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
46281
46282 sysfs_put_active_two(attr_sd);
46283
46284- if (buffer->event != atomic_read(&od->event))
46285+ if (buffer->event != atomic_read_unchecked(&od->event))
46286 goto trigger;
46287
46288 return DEFAULT_POLLMASK;
46289@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
46290
46291 od = sd->s_attr.open;
46292 if (od) {
46293- atomic_inc(&od->event);
46294+ atomic_inc_unchecked(&od->event);
46295 wake_up_interruptible(&od->poll);
46296 }
46297
46298diff -urNp linux-2.6.32.46/fs/sysfs/mount.c linux-2.6.32.46/fs/sysfs/mount.c
46299--- linux-2.6.32.46/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
46300+++ linux-2.6.32.46/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
46301@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46302 .s_name = "",
46303 .s_count = ATOMIC_INIT(1),
46304 .s_flags = SYSFS_DIR,
46305+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46306+ .s_mode = S_IFDIR | S_IRWXU,
46307+#else
46308 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46309+#endif
46310 .s_ino = 1,
46311 };
46312
46313diff -urNp linux-2.6.32.46/fs/sysfs/symlink.c linux-2.6.32.46/fs/sysfs/symlink.c
46314--- linux-2.6.32.46/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46315+++ linux-2.6.32.46/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46316@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
46317
46318 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46319 {
46320- char *page = nd_get_link(nd);
46321+ const char *page = nd_get_link(nd);
46322 if (!IS_ERR(page))
46323 free_page((unsigned long)page);
46324 }
46325diff -urNp linux-2.6.32.46/fs/udf/balloc.c linux-2.6.32.46/fs/udf/balloc.c
46326--- linux-2.6.32.46/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
46327+++ linux-2.6.32.46/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
46328@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
46329
46330 mutex_lock(&sbi->s_alloc_mutex);
46331 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46332- if (bloc->logicalBlockNum < 0 ||
46333- (bloc->logicalBlockNum + count) >
46334- partmap->s_partition_len) {
46335+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46336 udf_debug("%d < %d || %d + %d > %d\n",
46337 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
46338 count, partmap->s_partition_len);
46339@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
46340
46341 mutex_lock(&sbi->s_alloc_mutex);
46342 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46343- if (bloc->logicalBlockNum < 0 ||
46344- (bloc->logicalBlockNum + count) >
46345- partmap->s_partition_len) {
46346+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46347 udf_debug("%d < %d || %d + %d > %d\n",
46348 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
46349 partmap->s_partition_len);
46350diff -urNp linux-2.6.32.46/fs/udf/inode.c linux-2.6.32.46/fs/udf/inode.c
46351--- linux-2.6.32.46/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
46352+++ linux-2.6.32.46/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
46353@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
46354 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
46355 int lastblock = 0;
46356
46357+ pax_track_stack();
46358+
46359 prev_epos.offset = udf_file_entry_alloc_offset(inode);
46360 prev_epos.block = iinfo->i_location;
46361 prev_epos.bh = NULL;
46362diff -urNp linux-2.6.32.46/fs/udf/misc.c linux-2.6.32.46/fs/udf/misc.c
46363--- linux-2.6.32.46/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
46364+++ linux-2.6.32.46/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
46365@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
46366
46367 u8 udf_tag_checksum(const struct tag *t)
46368 {
46369- u8 *data = (u8 *)t;
46370+ const u8 *data = (const u8 *)t;
46371 u8 checksum = 0;
46372 int i;
46373 for (i = 0; i < sizeof(struct tag); ++i)
46374diff -urNp linux-2.6.32.46/fs/utimes.c linux-2.6.32.46/fs/utimes.c
46375--- linux-2.6.32.46/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
46376+++ linux-2.6.32.46/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
46377@@ -1,6 +1,7 @@
46378 #include <linux/compiler.h>
46379 #include <linux/file.h>
46380 #include <linux/fs.h>
46381+#include <linux/security.h>
46382 #include <linux/linkage.h>
46383 #include <linux/mount.h>
46384 #include <linux/namei.h>
46385@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
46386 goto mnt_drop_write_and_out;
46387 }
46388 }
46389+
46390+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46391+ error = -EACCES;
46392+ goto mnt_drop_write_and_out;
46393+ }
46394+
46395 mutex_lock(&inode->i_mutex);
46396 error = notify_change(path->dentry, &newattrs);
46397 mutex_unlock(&inode->i_mutex);
46398diff -urNp linux-2.6.32.46/fs/xattr_acl.c linux-2.6.32.46/fs/xattr_acl.c
46399--- linux-2.6.32.46/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
46400+++ linux-2.6.32.46/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
46401@@ -17,8 +17,8 @@
46402 struct posix_acl *
46403 posix_acl_from_xattr(const void *value, size_t size)
46404 {
46405- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
46406- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
46407+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
46408+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
46409 int count;
46410 struct posix_acl *acl;
46411 struct posix_acl_entry *acl_e;
46412diff -urNp linux-2.6.32.46/fs/xattr.c linux-2.6.32.46/fs/xattr.c
46413--- linux-2.6.32.46/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
46414+++ linux-2.6.32.46/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
46415@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46416 * Extended attribute SET operations
46417 */
46418 static long
46419-setxattr(struct dentry *d, const char __user *name, const void __user *value,
46420+setxattr(struct path *path, const char __user *name, const void __user *value,
46421 size_t size, int flags)
46422 {
46423 int error;
46424@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
46425 return PTR_ERR(kvalue);
46426 }
46427
46428- error = vfs_setxattr(d, kname, kvalue, size, flags);
46429+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46430+ error = -EACCES;
46431+ goto out;
46432+ }
46433+
46434+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
46435+out:
46436 kfree(kvalue);
46437 return error;
46438 }
46439@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
46440 return error;
46441 error = mnt_want_write(path.mnt);
46442 if (!error) {
46443- error = setxattr(path.dentry, name, value, size, flags);
46444+ error = setxattr(&path, name, value, size, flags);
46445 mnt_drop_write(path.mnt);
46446 }
46447 path_put(&path);
46448@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
46449 return error;
46450 error = mnt_want_write(path.mnt);
46451 if (!error) {
46452- error = setxattr(path.dentry, name, value, size, flags);
46453+ error = setxattr(&path, name, value, size, flags);
46454 mnt_drop_write(path.mnt);
46455 }
46456 path_put(&path);
46457@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
46458 const void __user *,value, size_t, size, int, flags)
46459 {
46460 struct file *f;
46461- struct dentry *dentry;
46462 int error = -EBADF;
46463
46464 f = fget(fd);
46465 if (!f)
46466 return error;
46467- dentry = f->f_path.dentry;
46468- audit_inode(NULL, dentry);
46469+ audit_inode(NULL, f->f_path.dentry);
46470 error = mnt_want_write_file(f);
46471 if (!error) {
46472- error = setxattr(dentry, name, value, size, flags);
46473+ error = setxattr(&f->f_path, name, value, size, flags);
46474 mnt_drop_write(f->f_path.mnt);
46475 }
46476 fput(f);
46477diff -urNp linux-2.6.32.46/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.46/fs/xfs/linux-2.6/xfs_ioctl32.c
46478--- linux-2.6.32.46/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
46479+++ linux-2.6.32.46/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
46480@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
46481 xfs_fsop_geom_t fsgeo;
46482 int error;
46483
46484+ memset(&fsgeo, 0, sizeof(fsgeo));
46485 error = xfs_fs_geometry(mp, &fsgeo, 3);
46486 if (error)
46487 return -error;
46488diff -urNp linux-2.6.32.46/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.46/fs/xfs/linux-2.6/xfs_ioctl.c
46489--- linux-2.6.32.46/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
46490+++ linux-2.6.32.46/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
46491@@ -134,7 +134,7 @@ xfs_find_handle(
46492 }
46493
46494 error = -EFAULT;
46495- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
46496+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
46497 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
46498 goto out_put;
46499
46500@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
46501 if (IS_ERR(dentry))
46502 return PTR_ERR(dentry);
46503
46504- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
46505+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
46506 if (!kbuf)
46507 goto out_dput;
46508
46509@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
46510 xfs_mount_t *mp,
46511 void __user *arg)
46512 {
46513- xfs_fsop_geom_t fsgeo;
46514+ xfs_fsop_geom_t fsgeo;
46515 int error;
46516
46517 error = xfs_fs_geometry(mp, &fsgeo, 3);
46518diff -urNp linux-2.6.32.46/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.46/fs/xfs/linux-2.6/xfs_iops.c
46519--- linux-2.6.32.46/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
46520+++ linux-2.6.32.46/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
46521@@ -468,7 +468,7 @@ xfs_vn_put_link(
46522 struct nameidata *nd,
46523 void *p)
46524 {
46525- char *s = nd_get_link(nd);
46526+ const char *s = nd_get_link(nd);
46527
46528 if (!IS_ERR(s))
46529 kfree(s);
46530diff -urNp linux-2.6.32.46/fs/xfs/xfs_bmap.c linux-2.6.32.46/fs/xfs/xfs_bmap.c
46531--- linux-2.6.32.46/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
46532+++ linux-2.6.32.46/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
46533@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
46534 int nmap,
46535 int ret_nmap);
46536 #else
46537-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
46538+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
46539 #endif /* DEBUG */
46540
46541 #if defined(XFS_RW_TRACE)
46542diff -urNp linux-2.6.32.46/fs/xfs/xfs_dir2_sf.c linux-2.6.32.46/fs/xfs/xfs_dir2_sf.c
46543--- linux-2.6.32.46/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
46544+++ linux-2.6.32.46/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
46545@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
46546 }
46547
46548 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
46549- if (filldir(dirent, sfep->name, sfep->namelen,
46550+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
46551+ char name[sfep->namelen];
46552+ memcpy(name, sfep->name, sfep->namelen);
46553+ if (filldir(dirent, name, sfep->namelen,
46554+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
46555+ *offset = off & 0x7fffffff;
46556+ return 0;
46557+ }
46558+ } else if (filldir(dirent, sfep->name, sfep->namelen,
46559 off & 0x7fffffff, ino, DT_UNKNOWN)) {
46560 *offset = off & 0x7fffffff;
46561 return 0;
46562diff -urNp linux-2.6.32.46/grsecurity/gracl_alloc.c linux-2.6.32.46/grsecurity/gracl_alloc.c
46563--- linux-2.6.32.46/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
46564+++ linux-2.6.32.46/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
46565@@ -0,0 +1,105 @@
46566+#include <linux/kernel.h>
46567+#include <linux/mm.h>
46568+#include <linux/slab.h>
46569+#include <linux/vmalloc.h>
46570+#include <linux/gracl.h>
46571+#include <linux/grsecurity.h>
46572+
46573+static unsigned long alloc_stack_next = 1;
46574+static unsigned long alloc_stack_size = 1;
46575+static void **alloc_stack;
46576+
46577+static __inline__ int
46578+alloc_pop(void)
46579+{
46580+ if (alloc_stack_next == 1)
46581+ return 0;
46582+
46583+ kfree(alloc_stack[alloc_stack_next - 2]);
46584+
46585+ alloc_stack_next--;
46586+
46587+ return 1;
46588+}
46589+
46590+static __inline__ int
46591+alloc_push(void *buf)
46592+{
46593+ if (alloc_stack_next >= alloc_stack_size)
46594+ return 1;
46595+
46596+ alloc_stack[alloc_stack_next - 1] = buf;
46597+
46598+ alloc_stack_next++;
46599+
46600+ return 0;
46601+}
46602+
46603+void *
46604+acl_alloc(unsigned long len)
46605+{
46606+ void *ret = NULL;
46607+
46608+ if (!len || len > PAGE_SIZE)
46609+ goto out;
46610+
46611+ ret = kmalloc(len, GFP_KERNEL);
46612+
46613+ if (ret) {
46614+ if (alloc_push(ret)) {
46615+ kfree(ret);
46616+ ret = NULL;
46617+ }
46618+ }
46619+
46620+out:
46621+ return ret;
46622+}
46623+
46624+void *
46625+acl_alloc_num(unsigned long num, unsigned long len)
46626+{
46627+ if (!len || (num > (PAGE_SIZE / len)))
46628+ return NULL;
46629+
46630+ return acl_alloc(num * len);
46631+}
46632+
46633+void
46634+acl_free_all(void)
46635+{
46636+ if (gr_acl_is_enabled() || !alloc_stack)
46637+ return;
46638+
46639+ while (alloc_pop()) ;
46640+
46641+ if (alloc_stack) {
46642+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
46643+ kfree(alloc_stack);
46644+ else
46645+ vfree(alloc_stack);
46646+ }
46647+
46648+ alloc_stack = NULL;
46649+ alloc_stack_size = 1;
46650+ alloc_stack_next = 1;
46651+
46652+ return;
46653+}
46654+
46655+int
46656+acl_alloc_stack_init(unsigned long size)
46657+{
46658+ if ((size * sizeof (void *)) <= PAGE_SIZE)
46659+ alloc_stack =
46660+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
46661+ else
46662+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
46663+
46664+ alloc_stack_size = size;
46665+
46666+ if (!alloc_stack)
46667+ return 0;
46668+ else
46669+ return 1;
46670+}
46671diff -urNp linux-2.6.32.46/grsecurity/gracl.c linux-2.6.32.46/grsecurity/gracl.c
46672--- linux-2.6.32.46/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
46673+++ linux-2.6.32.46/grsecurity/gracl.c 2011-07-14 20:02:48.000000000 -0400
46674@@ -0,0 +1,4082 @@
46675+#include <linux/kernel.h>
46676+#include <linux/module.h>
46677+#include <linux/sched.h>
46678+#include <linux/mm.h>
46679+#include <linux/file.h>
46680+#include <linux/fs.h>
46681+#include <linux/namei.h>
46682+#include <linux/mount.h>
46683+#include <linux/tty.h>
46684+#include <linux/proc_fs.h>
46685+#include <linux/smp_lock.h>
46686+#include <linux/slab.h>
46687+#include <linux/vmalloc.h>
46688+#include <linux/types.h>
46689+#include <linux/sysctl.h>
46690+#include <linux/netdevice.h>
46691+#include <linux/ptrace.h>
46692+#include <linux/gracl.h>
46693+#include <linux/gralloc.h>
46694+#include <linux/grsecurity.h>
46695+#include <linux/grinternal.h>
46696+#include <linux/pid_namespace.h>
46697+#include <linux/fdtable.h>
46698+#include <linux/percpu.h>
46699+
46700+#include <asm/uaccess.h>
46701+#include <asm/errno.h>
46702+#include <asm/mman.h>
46703+
46704+static struct acl_role_db acl_role_set;
46705+static struct name_db name_set;
46706+static struct inodev_db inodev_set;
46707+
46708+/* for keeping track of userspace pointers used for subjects, so we
46709+ can share references in the kernel as well
46710+*/
46711+
46712+static struct dentry *real_root;
46713+static struct vfsmount *real_root_mnt;
46714+
46715+static struct acl_subj_map_db subj_map_set;
46716+
46717+static struct acl_role_label *default_role;
46718+
46719+static struct acl_role_label *role_list;
46720+
46721+static u16 acl_sp_role_value;
46722+
46723+extern char *gr_shared_page[4];
46724+static DEFINE_MUTEX(gr_dev_mutex);
46725+DEFINE_RWLOCK(gr_inode_lock);
46726+
46727+struct gr_arg *gr_usermode;
46728+
46729+static unsigned int gr_status __read_only = GR_STATUS_INIT;
46730+
46731+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
46732+extern void gr_clear_learn_entries(void);
46733+
46734+#ifdef CONFIG_GRKERNSEC_RESLOG
46735+extern void gr_log_resource(const struct task_struct *task,
46736+ const int res, const unsigned long wanted, const int gt);
46737+#endif
46738+
46739+unsigned char *gr_system_salt;
46740+unsigned char *gr_system_sum;
46741+
46742+static struct sprole_pw **acl_special_roles = NULL;
46743+static __u16 num_sprole_pws = 0;
46744+
46745+static struct acl_role_label *kernel_role = NULL;
46746+
46747+static unsigned int gr_auth_attempts = 0;
46748+static unsigned long gr_auth_expires = 0UL;
46749+
46750+#ifdef CONFIG_NET
46751+extern struct vfsmount *sock_mnt;
46752+#endif
46753+extern struct vfsmount *pipe_mnt;
46754+extern struct vfsmount *shm_mnt;
46755+#ifdef CONFIG_HUGETLBFS
46756+extern struct vfsmount *hugetlbfs_vfsmount;
46757+#endif
46758+
46759+static struct acl_object_label *fakefs_obj_rw;
46760+static struct acl_object_label *fakefs_obj_rwx;
46761+
46762+extern int gr_init_uidset(void);
46763+extern void gr_free_uidset(void);
46764+extern void gr_remove_uid(uid_t uid);
46765+extern int gr_find_uid(uid_t uid);
46766+
46767+__inline__ int
46768+gr_acl_is_enabled(void)
46769+{
46770+ return (gr_status & GR_READY);
46771+}
46772+
46773+#ifdef CONFIG_BTRFS_FS
46774+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46775+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46776+#endif
46777+
46778+static inline dev_t __get_dev(const struct dentry *dentry)
46779+{
46780+#ifdef CONFIG_BTRFS_FS
46781+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46782+ return get_btrfs_dev_from_inode(dentry->d_inode);
46783+ else
46784+#endif
46785+ return dentry->d_inode->i_sb->s_dev;
46786+}
46787+
46788+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
46789+{
46790+ return __get_dev(dentry);
46791+}
46792+
46793+static char gr_task_roletype_to_char(struct task_struct *task)
46794+{
46795+ switch (task->role->roletype &
46796+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
46797+ GR_ROLE_SPECIAL)) {
46798+ case GR_ROLE_DEFAULT:
46799+ return 'D';
46800+ case GR_ROLE_USER:
46801+ return 'U';
46802+ case GR_ROLE_GROUP:
46803+ return 'G';
46804+ case GR_ROLE_SPECIAL:
46805+ return 'S';
46806+ }
46807+
46808+ return 'X';
46809+}
46810+
46811+char gr_roletype_to_char(void)
46812+{
46813+ return gr_task_roletype_to_char(current);
46814+}
46815+
46816+__inline__ int
46817+gr_acl_tpe_check(void)
46818+{
46819+ if (unlikely(!(gr_status & GR_READY)))
46820+ return 0;
46821+ if (current->role->roletype & GR_ROLE_TPE)
46822+ return 1;
46823+ else
46824+ return 0;
46825+}
46826+
46827+int
46828+gr_handle_rawio(const struct inode *inode)
46829+{
46830+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
46831+ if (inode && S_ISBLK(inode->i_mode) &&
46832+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
46833+ !capable(CAP_SYS_RAWIO))
46834+ return 1;
46835+#endif
46836+ return 0;
46837+}
46838+
46839+static int
46840+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
46841+{
46842+ if (likely(lena != lenb))
46843+ return 0;
46844+
46845+ return !memcmp(a, b, lena);
46846+}
46847+
46848+/* this must be called with vfsmount_lock and dcache_lock held */
46849+
46850+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
46851+ struct dentry *root, struct vfsmount *rootmnt,
46852+ char *buffer, int buflen)
46853+{
46854+ char * end = buffer+buflen;
46855+ char * retval;
46856+ int namelen;
46857+
46858+ *--end = '\0';
46859+ buflen--;
46860+
46861+ if (buflen < 1)
46862+ goto Elong;
46863+ /* Get '/' right */
46864+ retval = end-1;
46865+ *retval = '/';
46866+
46867+ for (;;) {
46868+ struct dentry * parent;
46869+
46870+ if (dentry == root && vfsmnt == rootmnt)
46871+ break;
46872+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
46873+ /* Global root? */
46874+ if (vfsmnt->mnt_parent == vfsmnt)
46875+ goto global_root;
46876+ dentry = vfsmnt->mnt_mountpoint;
46877+ vfsmnt = vfsmnt->mnt_parent;
46878+ continue;
46879+ }
46880+ parent = dentry->d_parent;
46881+ prefetch(parent);
46882+ namelen = dentry->d_name.len;
46883+ buflen -= namelen + 1;
46884+ if (buflen < 0)
46885+ goto Elong;
46886+ end -= namelen;
46887+ memcpy(end, dentry->d_name.name, namelen);
46888+ *--end = '/';
46889+ retval = end;
46890+ dentry = parent;
46891+ }
46892+
46893+out:
46894+ return retval;
46895+
46896+global_root:
46897+ namelen = dentry->d_name.len;
46898+ buflen -= namelen;
46899+ if (buflen < 0)
46900+ goto Elong;
46901+ retval -= namelen-1; /* hit the slash */
46902+ memcpy(retval, dentry->d_name.name, namelen);
46903+ goto out;
46904+Elong:
46905+ retval = ERR_PTR(-ENAMETOOLONG);
46906+ goto out;
46907+}
46908+
46909+static char *
46910+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
46911+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
46912+{
46913+ char *retval;
46914+
46915+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
46916+ if (unlikely(IS_ERR(retval)))
46917+ retval = strcpy(buf, "<path too long>");
46918+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
46919+ retval[1] = '\0';
46920+
46921+ return retval;
46922+}
46923+
46924+static char *
46925+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
46926+ char *buf, int buflen)
46927+{
46928+ char *res;
46929+
46930+ /* we can use real_root, real_root_mnt, because this is only called
46931+ by the RBAC system */
46932+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
46933+
46934+ return res;
46935+}
46936+
46937+static char *
46938+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
46939+ char *buf, int buflen)
46940+{
46941+ char *res;
46942+ struct dentry *root;
46943+ struct vfsmount *rootmnt;
46944+ struct task_struct *reaper = &init_task;
46945+
46946+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
46947+ read_lock(&reaper->fs->lock);
46948+ root = dget(reaper->fs->root.dentry);
46949+ rootmnt = mntget(reaper->fs->root.mnt);
46950+ read_unlock(&reaper->fs->lock);
46951+
46952+ spin_lock(&dcache_lock);
46953+ spin_lock(&vfsmount_lock);
46954+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
46955+ spin_unlock(&vfsmount_lock);
46956+ spin_unlock(&dcache_lock);
46957+
46958+ dput(root);
46959+ mntput(rootmnt);
46960+ return res;
46961+}
46962+
46963+static char *
46964+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
46965+{
46966+ char *ret;
46967+ spin_lock(&dcache_lock);
46968+ spin_lock(&vfsmount_lock);
46969+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
46970+ PAGE_SIZE);
46971+ spin_unlock(&vfsmount_lock);
46972+ spin_unlock(&dcache_lock);
46973+ return ret;
46974+}
46975+
46976+char *
46977+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
46978+{
46979+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
46980+ PAGE_SIZE);
46981+}
46982+
46983+char *
46984+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
46985+{
46986+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
46987+ PAGE_SIZE);
46988+}
46989+
46990+char *
46991+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
46992+{
46993+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
46994+ PAGE_SIZE);
46995+}
46996+
46997+char *
46998+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
46999+{
47000+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
47001+ PAGE_SIZE);
47002+}
47003+
47004+char *
47005+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
47006+{
47007+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
47008+ PAGE_SIZE);
47009+}
47010+
47011+__inline__ __u32
47012+to_gr_audit(const __u32 reqmode)
47013+{
47014+ /* masks off auditable permission flags, then shifts them to create
47015+ auditing flags, and adds the special case of append auditing if
47016+ we're requesting write */
47017+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
47018+}
47019+
47020+struct acl_subject_label *
47021+lookup_subject_map(const struct acl_subject_label *userp)
47022+{
47023+ unsigned int index = shash(userp, subj_map_set.s_size);
47024+ struct subject_map *match;
47025+
47026+ match = subj_map_set.s_hash[index];
47027+
47028+ while (match && match->user != userp)
47029+ match = match->next;
47030+
47031+ if (match != NULL)
47032+ return match->kernel;
47033+ else
47034+ return NULL;
47035+}
47036+
47037+static void
47038+insert_subj_map_entry(struct subject_map *subjmap)
47039+{
47040+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
47041+ struct subject_map **curr;
47042+
47043+ subjmap->prev = NULL;
47044+
47045+ curr = &subj_map_set.s_hash[index];
47046+ if (*curr != NULL)
47047+ (*curr)->prev = subjmap;
47048+
47049+ subjmap->next = *curr;
47050+ *curr = subjmap;
47051+
47052+ return;
47053+}
47054+
47055+static struct acl_role_label *
47056+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
47057+ const gid_t gid)
47058+{
47059+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
47060+ struct acl_role_label *match;
47061+ struct role_allowed_ip *ipp;
47062+ unsigned int x;
47063+ u32 curr_ip = task->signal->curr_ip;
47064+
47065+ task->signal->saved_ip = curr_ip;
47066+
47067+ match = acl_role_set.r_hash[index];
47068+
47069+ while (match) {
47070+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
47071+ for (x = 0; x < match->domain_child_num; x++) {
47072+ if (match->domain_children[x] == uid)
47073+ goto found;
47074+ }
47075+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
47076+ break;
47077+ match = match->next;
47078+ }
47079+found:
47080+ if (match == NULL) {
47081+ try_group:
47082+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
47083+ match = acl_role_set.r_hash[index];
47084+
47085+ while (match) {
47086+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
47087+ for (x = 0; x < match->domain_child_num; x++) {
47088+ if (match->domain_children[x] == gid)
47089+ goto found2;
47090+ }
47091+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
47092+ break;
47093+ match = match->next;
47094+ }
47095+found2:
47096+ if (match == NULL)
47097+ match = default_role;
47098+ if (match->allowed_ips == NULL)
47099+ return match;
47100+ else {
47101+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47102+ if (likely
47103+ ((ntohl(curr_ip) & ipp->netmask) ==
47104+ (ntohl(ipp->addr) & ipp->netmask)))
47105+ return match;
47106+ }
47107+ match = default_role;
47108+ }
47109+ } else if (match->allowed_ips == NULL) {
47110+ return match;
47111+ } else {
47112+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47113+ if (likely
47114+ ((ntohl(curr_ip) & ipp->netmask) ==
47115+ (ntohl(ipp->addr) & ipp->netmask)))
47116+ return match;
47117+ }
47118+ goto try_group;
47119+ }
47120+
47121+ return match;
47122+}
47123+
47124+struct acl_subject_label *
47125+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47126+ const struct acl_role_label *role)
47127+{
47128+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47129+ struct acl_subject_label *match;
47130+
47131+ match = role->subj_hash[index];
47132+
47133+ while (match && (match->inode != ino || match->device != dev ||
47134+ (match->mode & GR_DELETED))) {
47135+ match = match->next;
47136+ }
47137+
47138+ if (match && !(match->mode & GR_DELETED))
47139+ return match;
47140+ else
47141+ return NULL;
47142+}
47143+
47144+struct acl_subject_label *
47145+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47146+ const struct acl_role_label *role)
47147+{
47148+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47149+ struct acl_subject_label *match;
47150+
47151+ match = role->subj_hash[index];
47152+
47153+ while (match && (match->inode != ino || match->device != dev ||
47154+ !(match->mode & GR_DELETED))) {
47155+ match = match->next;
47156+ }
47157+
47158+ if (match && (match->mode & GR_DELETED))
47159+ return match;
47160+ else
47161+ return NULL;
47162+}
47163+
47164+static struct acl_object_label *
47165+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47166+ const struct acl_subject_label *subj)
47167+{
47168+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47169+ struct acl_object_label *match;
47170+
47171+ match = subj->obj_hash[index];
47172+
47173+ while (match && (match->inode != ino || match->device != dev ||
47174+ (match->mode & GR_DELETED))) {
47175+ match = match->next;
47176+ }
47177+
47178+ if (match && !(match->mode & GR_DELETED))
47179+ return match;
47180+ else
47181+ return NULL;
47182+}
47183+
47184+static struct acl_object_label *
47185+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47186+ const struct acl_subject_label *subj)
47187+{
47188+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47189+ struct acl_object_label *match;
47190+
47191+ match = subj->obj_hash[index];
47192+
47193+ while (match && (match->inode != ino || match->device != dev ||
47194+ !(match->mode & GR_DELETED))) {
47195+ match = match->next;
47196+ }
47197+
47198+ if (match && (match->mode & GR_DELETED))
47199+ return match;
47200+
47201+ match = subj->obj_hash[index];
47202+
47203+ while (match && (match->inode != ino || match->device != dev ||
47204+ (match->mode & GR_DELETED))) {
47205+ match = match->next;
47206+ }
47207+
47208+ if (match && !(match->mode & GR_DELETED))
47209+ return match;
47210+ else
47211+ return NULL;
47212+}
47213+
47214+static struct name_entry *
47215+lookup_name_entry(const char *name)
47216+{
47217+ unsigned int len = strlen(name);
47218+ unsigned int key = full_name_hash(name, len);
47219+ unsigned int index = key % name_set.n_size;
47220+ struct name_entry *match;
47221+
47222+ match = name_set.n_hash[index];
47223+
47224+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
47225+ match = match->next;
47226+
47227+ return match;
47228+}
47229+
47230+static struct name_entry *
47231+lookup_name_entry_create(const char *name)
47232+{
47233+ unsigned int len = strlen(name);
47234+ unsigned int key = full_name_hash(name, len);
47235+ unsigned int index = key % name_set.n_size;
47236+ struct name_entry *match;
47237+
47238+ match = name_set.n_hash[index];
47239+
47240+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47241+ !match->deleted))
47242+ match = match->next;
47243+
47244+ if (match && match->deleted)
47245+ return match;
47246+
47247+ match = name_set.n_hash[index];
47248+
47249+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47250+ match->deleted))
47251+ match = match->next;
47252+
47253+ if (match && !match->deleted)
47254+ return match;
47255+ else
47256+ return NULL;
47257+}
47258+
47259+static struct inodev_entry *
47260+lookup_inodev_entry(const ino_t ino, const dev_t dev)
47261+{
47262+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
47263+ struct inodev_entry *match;
47264+
47265+ match = inodev_set.i_hash[index];
47266+
47267+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
47268+ match = match->next;
47269+
47270+ return match;
47271+}
47272+
47273+static void
47274+insert_inodev_entry(struct inodev_entry *entry)
47275+{
47276+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
47277+ inodev_set.i_size);
47278+ struct inodev_entry **curr;
47279+
47280+ entry->prev = NULL;
47281+
47282+ curr = &inodev_set.i_hash[index];
47283+ if (*curr != NULL)
47284+ (*curr)->prev = entry;
47285+
47286+ entry->next = *curr;
47287+ *curr = entry;
47288+
47289+ return;
47290+}
47291+
47292+static void
47293+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
47294+{
47295+ unsigned int index =
47296+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
47297+ struct acl_role_label **curr;
47298+ struct acl_role_label *tmp;
47299+
47300+ curr = &acl_role_set.r_hash[index];
47301+
47302+ /* if role was already inserted due to domains and already has
47303+ a role in the same bucket as it attached, then we need to
47304+ combine these two buckets
47305+ */
47306+ if (role->next) {
47307+ tmp = role->next;
47308+ while (tmp->next)
47309+ tmp = tmp->next;
47310+ tmp->next = *curr;
47311+ } else
47312+ role->next = *curr;
47313+ *curr = role;
47314+
47315+ return;
47316+}
47317+
47318+static void
47319+insert_acl_role_label(struct acl_role_label *role)
47320+{
47321+ int i;
47322+
47323+ if (role_list == NULL) {
47324+ role_list = role;
47325+ role->prev = NULL;
47326+ } else {
47327+ role->prev = role_list;
47328+ role_list = role;
47329+ }
47330+
47331+ /* used for hash chains */
47332+ role->next = NULL;
47333+
47334+ if (role->roletype & GR_ROLE_DOMAIN) {
47335+ for (i = 0; i < role->domain_child_num; i++)
47336+ __insert_acl_role_label(role, role->domain_children[i]);
47337+ } else
47338+ __insert_acl_role_label(role, role->uidgid);
47339+}
47340+
47341+static int
47342+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
47343+{
47344+ struct name_entry **curr, *nentry;
47345+ struct inodev_entry *ientry;
47346+ unsigned int len = strlen(name);
47347+ unsigned int key = full_name_hash(name, len);
47348+ unsigned int index = key % name_set.n_size;
47349+
47350+ curr = &name_set.n_hash[index];
47351+
47352+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
47353+ curr = &((*curr)->next);
47354+
47355+ if (*curr != NULL)
47356+ return 1;
47357+
47358+ nentry = acl_alloc(sizeof (struct name_entry));
47359+ if (nentry == NULL)
47360+ return 0;
47361+ ientry = acl_alloc(sizeof (struct inodev_entry));
47362+ if (ientry == NULL)
47363+ return 0;
47364+ ientry->nentry = nentry;
47365+
47366+ nentry->key = key;
47367+ nentry->name = name;
47368+ nentry->inode = inode;
47369+ nentry->device = device;
47370+ nentry->len = len;
47371+ nentry->deleted = deleted;
47372+
47373+ nentry->prev = NULL;
47374+ curr = &name_set.n_hash[index];
47375+ if (*curr != NULL)
47376+ (*curr)->prev = nentry;
47377+ nentry->next = *curr;
47378+ *curr = nentry;
47379+
47380+ /* insert us into the table searchable by inode/dev */
47381+ insert_inodev_entry(ientry);
47382+
47383+ return 1;
47384+}
47385+
47386+static void
47387+insert_acl_obj_label(struct acl_object_label *obj,
47388+ struct acl_subject_label *subj)
47389+{
47390+ unsigned int index =
47391+ fhash(obj->inode, obj->device, subj->obj_hash_size);
47392+ struct acl_object_label **curr;
47393+
47394+
47395+ obj->prev = NULL;
47396+
47397+ curr = &subj->obj_hash[index];
47398+ if (*curr != NULL)
47399+ (*curr)->prev = obj;
47400+
47401+ obj->next = *curr;
47402+ *curr = obj;
47403+
47404+ return;
47405+}
47406+
47407+static void
47408+insert_acl_subj_label(struct acl_subject_label *obj,
47409+ struct acl_role_label *role)
47410+{
47411+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
47412+ struct acl_subject_label **curr;
47413+
47414+ obj->prev = NULL;
47415+
47416+ curr = &role->subj_hash[index];
47417+ if (*curr != NULL)
47418+ (*curr)->prev = obj;
47419+
47420+ obj->next = *curr;
47421+ *curr = obj;
47422+
47423+ return;
47424+}
47425+
47426+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
47427+
47428+static void *
47429+create_table(__u32 * len, int elementsize)
47430+{
47431+ unsigned int table_sizes[] = {
47432+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
47433+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
47434+ 4194301, 8388593, 16777213, 33554393, 67108859
47435+ };
47436+ void *newtable = NULL;
47437+ unsigned int pwr = 0;
47438+
47439+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
47440+ table_sizes[pwr] <= *len)
47441+ pwr++;
47442+
47443+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
47444+ return newtable;
47445+
47446+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
47447+ newtable =
47448+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
47449+ else
47450+ newtable = vmalloc(table_sizes[pwr] * elementsize);
47451+
47452+ *len = table_sizes[pwr];
47453+
47454+ return newtable;
47455+}
47456+
47457+static int
47458+init_variables(const struct gr_arg *arg)
47459+{
47460+ struct task_struct *reaper = &init_task;
47461+ unsigned int stacksize;
47462+
47463+ subj_map_set.s_size = arg->role_db.num_subjects;
47464+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
47465+ name_set.n_size = arg->role_db.num_objects;
47466+ inodev_set.i_size = arg->role_db.num_objects;
47467+
47468+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
47469+ !name_set.n_size || !inodev_set.i_size)
47470+ return 1;
47471+
47472+ if (!gr_init_uidset())
47473+ return 1;
47474+
47475+ /* set up the stack that holds allocation info */
47476+
47477+ stacksize = arg->role_db.num_pointers + 5;
47478+
47479+ if (!acl_alloc_stack_init(stacksize))
47480+ return 1;
47481+
47482+ /* grab reference for the real root dentry and vfsmount */
47483+ read_lock(&reaper->fs->lock);
47484+ real_root = dget(reaper->fs->root.dentry);
47485+ real_root_mnt = mntget(reaper->fs->root.mnt);
47486+ read_unlock(&reaper->fs->lock);
47487+
47488+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47489+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
47490+#endif
47491+
47492+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
47493+ if (fakefs_obj_rw == NULL)
47494+ return 1;
47495+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
47496+
47497+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
47498+ if (fakefs_obj_rwx == NULL)
47499+ return 1;
47500+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
47501+
47502+ subj_map_set.s_hash =
47503+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
47504+ acl_role_set.r_hash =
47505+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
47506+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
47507+ inodev_set.i_hash =
47508+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
47509+
47510+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
47511+ !name_set.n_hash || !inodev_set.i_hash)
47512+ return 1;
47513+
47514+ memset(subj_map_set.s_hash, 0,
47515+ sizeof(struct subject_map *) * subj_map_set.s_size);
47516+ memset(acl_role_set.r_hash, 0,
47517+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
47518+ memset(name_set.n_hash, 0,
47519+ sizeof (struct name_entry *) * name_set.n_size);
47520+ memset(inodev_set.i_hash, 0,
47521+ sizeof (struct inodev_entry *) * inodev_set.i_size);
47522+
47523+ return 0;
47524+}
47525+
47526+/* free information not needed after startup
47527+ currently contains user->kernel pointer mappings for subjects
47528+*/
47529+
47530+static void
47531+free_init_variables(void)
47532+{
47533+ __u32 i;
47534+
47535+ if (subj_map_set.s_hash) {
47536+ for (i = 0; i < subj_map_set.s_size; i++) {
47537+ if (subj_map_set.s_hash[i]) {
47538+ kfree(subj_map_set.s_hash[i]);
47539+ subj_map_set.s_hash[i] = NULL;
47540+ }
47541+ }
47542+
47543+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
47544+ PAGE_SIZE)
47545+ kfree(subj_map_set.s_hash);
47546+ else
47547+ vfree(subj_map_set.s_hash);
47548+ }
47549+
47550+ return;
47551+}
47552+
47553+static void
47554+free_variables(void)
47555+{
47556+ struct acl_subject_label *s;
47557+ struct acl_role_label *r;
47558+ struct task_struct *task, *task2;
47559+ unsigned int x;
47560+
47561+ gr_clear_learn_entries();
47562+
47563+ read_lock(&tasklist_lock);
47564+ do_each_thread(task2, task) {
47565+ task->acl_sp_role = 0;
47566+ task->acl_role_id = 0;
47567+ task->acl = NULL;
47568+ task->role = NULL;
47569+ } while_each_thread(task2, task);
47570+ read_unlock(&tasklist_lock);
47571+
47572+ /* release the reference to the real root dentry and vfsmount */
47573+ if (real_root)
47574+ dput(real_root);
47575+ real_root = NULL;
47576+ if (real_root_mnt)
47577+ mntput(real_root_mnt);
47578+ real_root_mnt = NULL;
47579+
47580+ /* free all object hash tables */
47581+
47582+ FOR_EACH_ROLE_START(r)
47583+ if (r->subj_hash == NULL)
47584+ goto next_role;
47585+ FOR_EACH_SUBJECT_START(r, s, x)
47586+ if (s->obj_hash == NULL)
47587+ break;
47588+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47589+ kfree(s->obj_hash);
47590+ else
47591+ vfree(s->obj_hash);
47592+ FOR_EACH_SUBJECT_END(s, x)
47593+ FOR_EACH_NESTED_SUBJECT_START(r, s)
47594+ if (s->obj_hash == NULL)
47595+ break;
47596+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
47597+ kfree(s->obj_hash);
47598+ else
47599+ vfree(s->obj_hash);
47600+ FOR_EACH_NESTED_SUBJECT_END(s)
47601+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
47602+ kfree(r->subj_hash);
47603+ else
47604+ vfree(r->subj_hash);
47605+ r->subj_hash = NULL;
47606+next_role:
47607+ FOR_EACH_ROLE_END(r)
47608+
47609+ acl_free_all();
47610+
47611+ if (acl_role_set.r_hash) {
47612+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
47613+ PAGE_SIZE)
47614+ kfree(acl_role_set.r_hash);
47615+ else
47616+ vfree(acl_role_set.r_hash);
47617+ }
47618+ if (name_set.n_hash) {
47619+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
47620+ PAGE_SIZE)
47621+ kfree(name_set.n_hash);
47622+ else
47623+ vfree(name_set.n_hash);
47624+ }
47625+
47626+ if (inodev_set.i_hash) {
47627+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
47628+ PAGE_SIZE)
47629+ kfree(inodev_set.i_hash);
47630+ else
47631+ vfree(inodev_set.i_hash);
47632+ }
47633+
47634+ gr_free_uidset();
47635+
47636+ memset(&name_set, 0, sizeof (struct name_db));
47637+ memset(&inodev_set, 0, sizeof (struct inodev_db));
47638+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
47639+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
47640+
47641+ default_role = NULL;
47642+ role_list = NULL;
47643+
47644+ return;
47645+}
47646+
47647+static __u32
47648+count_user_objs(struct acl_object_label *userp)
47649+{
47650+ struct acl_object_label o_tmp;
47651+ __u32 num = 0;
47652+
47653+ while (userp) {
47654+ if (copy_from_user(&o_tmp, userp,
47655+ sizeof (struct acl_object_label)))
47656+ break;
47657+
47658+ userp = o_tmp.prev;
47659+ num++;
47660+ }
47661+
47662+ return num;
47663+}
47664+
47665+static struct acl_subject_label *
47666+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
47667+
47668+static int
47669+copy_user_glob(struct acl_object_label *obj)
47670+{
47671+ struct acl_object_label *g_tmp, **guser;
47672+ unsigned int len;
47673+ char *tmp;
47674+
47675+ if (obj->globbed == NULL)
47676+ return 0;
47677+
47678+ guser = &obj->globbed;
47679+ while (*guser) {
47680+ g_tmp = (struct acl_object_label *)
47681+ acl_alloc(sizeof (struct acl_object_label));
47682+ if (g_tmp == NULL)
47683+ return -ENOMEM;
47684+
47685+ if (copy_from_user(g_tmp, *guser,
47686+ sizeof (struct acl_object_label)))
47687+ return -EFAULT;
47688+
47689+ len = strnlen_user(g_tmp->filename, PATH_MAX);
47690+
47691+ if (!len || len >= PATH_MAX)
47692+ return -EINVAL;
47693+
47694+ if ((tmp = (char *) acl_alloc(len)) == NULL)
47695+ return -ENOMEM;
47696+
47697+ if (copy_from_user(tmp, g_tmp->filename, len))
47698+ return -EFAULT;
47699+ tmp[len-1] = '\0';
47700+ g_tmp->filename = tmp;
47701+
47702+ *guser = g_tmp;
47703+ guser = &(g_tmp->next);
47704+ }
47705+
47706+ return 0;
47707+}
47708+
47709+static int
47710+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
47711+ struct acl_role_label *role)
47712+{
47713+ struct acl_object_label *o_tmp;
47714+ unsigned int len;
47715+ int ret;
47716+ char *tmp;
47717+
47718+ while (userp) {
47719+ if ((o_tmp = (struct acl_object_label *)
47720+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
47721+ return -ENOMEM;
47722+
47723+ if (copy_from_user(o_tmp, userp,
47724+ sizeof (struct acl_object_label)))
47725+ return -EFAULT;
47726+
47727+ userp = o_tmp->prev;
47728+
47729+ len = strnlen_user(o_tmp->filename, PATH_MAX);
47730+
47731+ if (!len || len >= PATH_MAX)
47732+ return -EINVAL;
47733+
47734+ if ((tmp = (char *) acl_alloc(len)) == NULL)
47735+ return -ENOMEM;
47736+
47737+ if (copy_from_user(tmp, o_tmp->filename, len))
47738+ return -EFAULT;
47739+ tmp[len-1] = '\0';
47740+ o_tmp->filename = tmp;
47741+
47742+ insert_acl_obj_label(o_tmp, subj);
47743+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
47744+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
47745+ return -ENOMEM;
47746+
47747+ ret = copy_user_glob(o_tmp);
47748+ if (ret)
47749+ return ret;
47750+
47751+ if (o_tmp->nested) {
47752+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
47753+ if (IS_ERR(o_tmp->nested))
47754+ return PTR_ERR(o_tmp->nested);
47755+
47756+ /* insert into nested subject list */
47757+ o_tmp->nested->next = role->hash->first;
47758+ role->hash->first = o_tmp->nested;
47759+ }
47760+ }
47761+
47762+ return 0;
47763+}
47764+
47765+static __u32
47766+count_user_subjs(struct acl_subject_label *userp)
47767+{
47768+ struct acl_subject_label s_tmp;
47769+ __u32 num = 0;
47770+
47771+ while (userp) {
47772+ if (copy_from_user(&s_tmp, userp,
47773+ sizeof (struct acl_subject_label)))
47774+ break;
47775+
47776+ userp = s_tmp.prev;
47777+ /* do not count nested subjects against this count, since
47778+ they are not included in the hash table, but are
47779+ attached to objects. We have already counted
47780+ the subjects in userspace for the allocation
47781+ stack
47782+ */
47783+ if (!(s_tmp.mode & GR_NESTED))
47784+ num++;
47785+ }
47786+
47787+ return num;
47788+}
47789+
47790+static int
47791+copy_user_allowedips(struct acl_role_label *rolep)
47792+{
47793+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
47794+
47795+ ruserip = rolep->allowed_ips;
47796+
47797+ while (ruserip) {
47798+ rlast = rtmp;
47799+
47800+ if ((rtmp = (struct role_allowed_ip *)
47801+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
47802+ return -ENOMEM;
47803+
47804+ if (copy_from_user(rtmp, ruserip,
47805+ sizeof (struct role_allowed_ip)))
47806+ return -EFAULT;
47807+
47808+ ruserip = rtmp->prev;
47809+
47810+ if (!rlast) {
47811+ rtmp->prev = NULL;
47812+ rolep->allowed_ips = rtmp;
47813+ } else {
47814+ rlast->next = rtmp;
47815+ rtmp->prev = rlast;
47816+ }
47817+
47818+ if (!ruserip)
47819+ rtmp->next = NULL;
47820+ }
47821+
47822+ return 0;
47823+}
47824+
47825+static int
47826+copy_user_transitions(struct acl_role_label *rolep)
47827+{
47828+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
47829+
47830+ unsigned int len;
47831+ char *tmp;
47832+
47833+ rusertp = rolep->transitions;
47834+
47835+ while (rusertp) {
47836+ rlast = rtmp;
47837+
47838+ if ((rtmp = (struct role_transition *)
47839+ acl_alloc(sizeof (struct role_transition))) == NULL)
47840+ return -ENOMEM;
47841+
47842+ if (copy_from_user(rtmp, rusertp,
47843+ sizeof (struct role_transition)))
47844+ return -EFAULT;
47845+
47846+ rusertp = rtmp->prev;
47847+
47848+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
47849+
47850+ if (!len || len >= GR_SPROLE_LEN)
47851+ return -EINVAL;
47852+
47853+ if ((tmp = (char *) acl_alloc(len)) == NULL)
47854+ return -ENOMEM;
47855+
47856+ if (copy_from_user(tmp, rtmp->rolename, len))
47857+ return -EFAULT;
47858+ tmp[len-1] = '\0';
47859+ rtmp->rolename = tmp;
47860+
47861+ if (!rlast) {
47862+ rtmp->prev = NULL;
47863+ rolep->transitions = rtmp;
47864+ } else {
47865+ rlast->next = rtmp;
47866+ rtmp->prev = rlast;
47867+ }
47868+
47869+ if (!rusertp)
47870+ rtmp->next = NULL;
47871+ }
47872+
47873+ return 0;
47874+}
47875+
47876+static struct acl_subject_label *
47877+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
47878+{
47879+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
47880+ unsigned int len;
47881+ char *tmp;
47882+ __u32 num_objs;
47883+ struct acl_ip_label **i_tmp, *i_utmp2;
47884+ struct gr_hash_struct ghash;
47885+ struct subject_map *subjmap;
47886+ unsigned int i_num;
47887+ int err;
47888+
47889+ s_tmp = lookup_subject_map(userp);
47890+
47891+ /* we've already copied this subject into the kernel, just return
47892+ the reference to it, and don't copy it over again
47893+ */
47894+ if (s_tmp)
47895+ return(s_tmp);
47896+
47897+ if ((s_tmp = (struct acl_subject_label *)
47898+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
47899+ return ERR_PTR(-ENOMEM);
47900+
47901+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
47902+ if (subjmap == NULL)
47903+ return ERR_PTR(-ENOMEM);
47904+
47905+ subjmap->user = userp;
47906+ subjmap->kernel = s_tmp;
47907+ insert_subj_map_entry(subjmap);
47908+
47909+ if (copy_from_user(s_tmp, userp,
47910+ sizeof (struct acl_subject_label)))
47911+ return ERR_PTR(-EFAULT);
47912+
47913+ len = strnlen_user(s_tmp->filename, PATH_MAX);
47914+
47915+ if (!len || len >= PATH_MAX)
47916+ return ERR_PTR(-EINVAL);
47917+
47918+ if ((tmp = (char *) acl_alloc(len)) == NULL)
47919+ return ERR_PTR(-ENOMEM);
47920+
47921+ if (copy_from_user(tmp, s_tmp->filename, len))
47922+ return ERR_PTR(-EFAULT);
47923+ tmp[len-1] = '\0';
47924+ s_tmp->filename = tmp;
47925+
47926+ if (!strcmp(s_tmp->filename, "/"))
47927+ role->root_label = s_tmp;
47928+
47929+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
47930+ return ERR_PTR(-EFAULT);
47931+
47932+ /* copy user and group transition tables */
47933+
47934+ if (s_tmp->user_trans_num) {
47935+ uid_t *uidlist;
47936+
47937+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
47938+ if (uidlist == NULL)
47939+ return ERR_PTR(-ENOMEM);
47940+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
47941+ return ERR_PTR(-EFAULT);
47942+
47943+ s_tmp->user_transitions = uidlist;
47944+ }
47945+
47946+ if (s_tmp->group_trans_num) {
47947+ gid_t *gidlist;
47948+
47949+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
47950+ if (gidlist == NULL)
47951+ return ERR_PTR(-ENOMEM);
47952+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
47953+ return ERR_PTR(-EFAULT);
47954+
47955+ s_tmp->group_transitions = gidlist;
47956+ }
47957+
47958+ /* set up object hash table */
47959+ num_objs = count_user_objs(ghash.first);
47960+
47961+ s_tmp->obj_hash_size = num_objs;
47962+ s_tmp->obj_hash =
47963+ (struct acl_object_label **)
47964+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
47965+
47966+ if (!s_tmp->obj_hash)
47967+ return ERR_PTR(-ENOMEM);
47968+
47969+ memset(s_tmp->obj_hash, 0,
47970+ s_tmp->obj_hash_size *
47971+ sizeof (struct acl_object_label *));
47972+
47973+ /* add in objects */
47974+ err = copy_user_objs(ghash.first, s_tmp, role);
47975+
47976+ if (err)
47977+ return ERR_PTR(err);
47978+
47979+ /* set pointer for parent subject */
47980+ if (s_tmp->parent_subject) {
47981+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
47982+
47983+ if (IS_ERR(s_tmp2))
47984+ return s_tmp2;
47985+
47986+ s_tmp->parent_subject = s_tmp2;
47987+ }
47988+
47989+ /* add in ip acls */
47990+
47991+ if (!s_tmp->ip_num) {
47992+ s_tmp->ips = NULL;
47993+ goto insert;
47994+ }
47995+
47996+ i_tmp =
47997+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
47998+ sizeof (struct acl_ip_label *));
47999+
48000+ if (!i_tmp)
48001+ return ERR_PTR(-ENOMEM);
48002+
48003+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
48004+ *(i_tmp + i_num) =
48005+ (struct acl_ip_label *)
48006+ acl_alloc(sizeof (struct acl_ip_label));
48007+ if (!*(i_tmp + i_num))
48008+ return ERR_PTR(-ENOMEM);
48009+
48010+ if (copy_from_user
48011+ (&i_utmp2, s_tmp->ips + i_num,
48012+ sizeof (struct acl_ip_label *)))
48013+ return ERR_PTR(-EFAULT);
48014+
48015+ if (copy_from_user
48016+ (*(i_tmp + i_num), i_utmp2,
48017+ sizeof (struct acl_ip_label)))
48018+ return ERR_PTR(-EFAULT);
48019+
48020+ if ((*(i_tmp + i_num))->iface == NULL)
48021+ continue;
48022+
48023+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
48024+ if (!len || len >= IFNAMSIZ)
48025+ return ERR_PTR(-EINVAL);
48026+ tmp = acl_alloc(len);
48027+ if (tmp == NULL)
48028+ return ERR_PTR(-ENOMEM);
48029+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
48030+ return ERR_PTR(-EFAULT);
48031+ (*(i_tmp + i_num))->iface = tmp;
48032+ }
48033+
48034+ s_tmp->ips = i_tmp;
48035+
48036+insert:
48037+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
48038+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
48039+ return ERR_PTR(-ENOMEM);
48040+
48041+ return s_tmp;
48042+}
48043+
48044+static int
48045+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
48046+{
48047+ struct acl_subject_label s_pre;
48048+ struct acl_subject_label * ret;
48049+ int err;
48050+
48051+ while (userp) {
48052+ if (copy_from_user(&s_pre, userp,
48053+ sizeof (struct acl_subject_label)))
48054+ return -EFAULT;
48055+
48056+ /* do not add nested subjects here, add
48057+ while parsing objects
48058+ */
48059+
48060+ if (s_pre.mode & GR_NESTED) {
48061+ userp = s_pre.prev;
48062+ continue;
48063+ }
48064+
48065+ ret = do_copy_user_subj(userp, role);
48066+
48067+ err = PTR_ERR(ret);
48068+ if (IS_ERR(ret))
48069+ return err;
48070+
48071+ insert_acl_subj_label(ret, role);
48072+
48073+ userp = s_pre.prev;
48074+ }
48075+
48076+ return 0;
48077+}
48078+
48079+static int
48080+copy_user_acl(struct gr_arg *arg)
48081+{
48082+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
48083+ struct sprole_pw *sptmp;
48084+ struct gr_hash_struct *ghash;
48085+ uid_t *domainlist;
48086+ unsigned int r_num;
48087+ unsigned int len;
48088+ char *tmp;
48089+ int err = 0;
48090+ __u16 i;
48091+ __u32 num_subjs;
48092+
48093+ /* we need a default and kernel role */
48094+ if (arg->role_db.num_roles < 2)
48095+ return -EINVAL;
48096+
48097+ /* copy special role authentication info from userspace */
48098+
48099+ num_sprole_pws = arg->num_sprole_pws;
48100+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48101+
48102+ if (!acl_special_roles) {
48103+ err = -ENOMEM;
48104+ goto cleanup;
48105+ }
48106+
48107+ for (i = 0; i < num_sprole_pws; i++) {
48108+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48109+ if (!sptmp) {
48110+ err = -ENOMEM;
48111+ goto cleanup;
48112+ }
48113+ if (copy_from_user(sptmp, arg->sprole_pws + i,
48114+ sizeof (struct sprole_pw))) {
48115+ err = -EFAULT;
48116+ goto cleanup;
48117+ }
48118+
48119+ len =
48120+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48121+
48122+ if (!len || len >= GR_SPROLE_LEN) {
48123+ err = -EINVAL;
48124+ goto cleanup;
48125+ }
48126+
48127+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48128+ err = -ENOMEM;
48129+ goto cleanup;
48130+ }
48131+
48132+ if (copy_from_user(tmp, sptmp->rolename, len)) {
48133+ err = -EFAULT;
48134+ goto cleanup;
48135+ }
48136+ tmp[len-1] = '\0';
48137+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48138+ printk(KERN_ALERT "Copying special role %s\n", tmp);
48139+#endif
48140+ sptmp->rolename = tmp;
48141+ acl_special_roles[i] = sptmp;
48142+ }
48143+
48144+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48145+
48146+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48147+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
48148+
48149+ if (!r_tmp) {
48150+ err = -ENOMEM;
48151+ goto cleanup;
48152+ }
48153+
48154+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
48155+ sizeof (struct acl_role_label *))) {
48156+ err = -EFAULT;
48157+ goto cleanup;
48158+ }
48159+
48160+ if (copy_from_user(r_tmp, r_utmp2,
48161+ sizeof (struct acl_role_label))) {
48162+ err = -EFAULT;
48163+ goto cleanup;
48164+ }
48165+
48166+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48167+
48168+ if (!len || len >= PATH_MAX) {
48169+ err = -EINVAL;
48170+ goto cleanup;
48171+ }
48172+
48173+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48174+ err = -ENOMEM;
48175+ goto cleanup;
48176+ }
48177+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
48178+ err = -EFAULT;
48179+ goto cleanup;
48180+ }
48181+ tmp[len-1] = '\0';
48182+ r_tmp->rolename = tmp;
48183+
48184+ if (!strcmp(r_tmp->rolename, "default")
48185+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48186+ default_role = r_tmp;
48187+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48188+ kernel_role = r_tmp;
48189+ }
48190+
48191+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48192+ err = -ENOMEM;
48193+ goto cleanup;
48194+ }
48195+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
48196+ err = -EFAULT;
48197+ goto cleanup;
48198+ }
48199+
48200+ r_tmp->hash = ghash;
48201+
48202+ num_subjs = count_user_subjs(r_tmp->hash->first);
48203+
48204+ r_tmp->subj_hash_size = num_subjs;
48205+ r_tmp->subj_hash =
48206+ (struct acl_subject_label **)
48207+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
48208+
48209+ if (!r_tmp->subj_hash) {
48210+ err = -ENOMEM;
48211+ goto cleanup;
48212+ }
48213+
48214+ err = copy_user_allowedips(r_tmp);
48215+ if (err)
48216+ goto cleanup;
48217+
48218+ /* copy domain info */
48219+ if (r_tmp->domain_children != NULL) {
48220+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
48221+ if (domainlist == NULL) {
48222+ err = -ENOMEM;
48223+ goto cleanup;
48224+ }
48225+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
48226+ err = -EFAULT;
48227+ goto cleanup;
48228+ }
48229+ r_tmp->domain_children = domainlist;
48230+ }
48231+
48232+ err = copy_user_transitions(r_tmp);
48233+ if (err)
48234+ goto cleanup;
48235+
48236+ memset(r_tmp->subj_hash, 0,
48237+ r_tmp->subj_hash_size *
48238+ sizeof (struct acl_subject_label *));
48239+
48240+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
48241+
48242+ if (err)
48243+ goto cleanup;
48244+
48245+ /* set nested subject list to null */
48246+ r_tmp->hash->first = NULL;
48247+
48248+ insert_acl_role_label(r_tmp);
48249+ }
48250+
48251+ goto return_err;
48252+ cleanup:
48253+ free_variables();
48254+ return_err:
48255+ return err;
48256+
48257+}
48258+
48259+static int
48260+gracl_init(struct gr_arg *args)
48261+{
48262+ int error = 0;
48263+
48264+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
48265+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
48266+
48267+ if (init_variables(args)) {
48268+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
48269+ error = -ENOMEM;
48270+ free_variables();
48271+ goto out;
48272+ }
48273+
48274+ error = copy_user_acl(args);
48275+ free_init_variables();
48276+ if (error) {
48277+ free_variables();
48278+ goto out;
48279+ }
48280+
48281+ if ((error = gr_set_acls(0))) {
48282+ free_variables();
48283+ goto out;
48284+ }
48285+
48286+ pax_open_kernel();
48287+ gr_status |= GR_READY;
48288+ pax_close_kernel();
48289+
48290+ out:
48291+ return error;
48292+}
48293+
48294+/* derived from glibc fnmatch() 0: match, 1: no match*/
48295+
48296+static int
48297+glob_match(const char *p, const char *n)
48298+{
48299+ char c;
48300+
48301+ while ((c = *p++) != '\0') {
48302+ switch (c) {
48303+ case '?':
48304+ if (*n == '\0')
48305+ return 1;
48306+ else if (*n == '/')
48307+ return 1;
48308+ break;
48309+ case '\\':
48310+ if (*n != c)
48311+ return 1;
48312+ break;
48313+ case '*':
48314+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
48315+ if (*n == '/')
48316+ return 1;
48317+ else if (c == '?') {
48318+ if (*n == '\0')
48319+ return 1;
48320+ else
48321+ ++n;
48322+ }
48323+ }
48324+ if (c == '\0') {
48325+ return 0;
48326+ } else {
48327+ const char *endp;
48328+
48329+ if ((endp = strchr(n, '/')) == NULL)
48330+ endp = n + strlen(n);
48331+
48332+ if (c == '[') {
48333+ for (--p; n < endp; ++n)
48334+ if (!glob_match(p, n))
48335+ return 0;
48336+ } else if (c == '/') {
48337+ while (*n != '\0' && *n != '/')
48338+ ++n;
48339+ if (*n == '/' && !glob_match(p, n + 1))
48340+ return 0;
48341+ } else {
48342+ for (--p; n < endp; ++n)
48343+ if (*n == c && !glob_match(p, n))
48344+ return 0;
48345+ }
48346+
48347+ return 1;
48348+ }
48349+ case '[':
48350+ {
48351+ int not;
48352+ char cold;
48353+
48354+ if (*n == '\0' || *n == '/')
48355+ return 1;
48356+
48357+ not = (*p == '!' || *p == '^');
48358+ if (not)
48359+ ++p;
48360+
48361+ c = *p++;
48362+ for (;;) {
48363+ unsigned char fn = (unsigned char)*n;
48364+
48365+ if (c == '\0')
48366+ return 1;
48367+ else {
48368+ if (c == fn)
48369+ goto matched;
48370+ cold = c;
48371+ c = *p++;
48372+
48373+ if (c == '-' && *p != ']') {
48374+ unsigned char cend = *p++;
48375+
48376+ if (cend == '\0')
48377+ return 1;
48378+
48379+ if (cold <= fn && fn <= cend)
48380+ goto matched;
48381+
48382+ c = *p++;
48383+ }
48384+ }
48385+
48386+ if (c == ']')
48387+ break;
48388+ }
48389+ if (!not)
48390+ return 1;
48391+ break;
48392+ matched:
48393+ while (c != ']') {
48394+ if (c == '\0')
48395+ return 1;
48396+
48397+ c = *p++;
48398+ }
48399+ if (not)
48400+ return 1;
48401+ }
48402+ break;
48403+ default:
48404+ if (c != *n)
48405+ return 1;
48406+ }
48407+
48408+ ++n;
48409+ }
48410+
48411+ if (*n == '\0')
48412+ return 0;
48413+
48414+ if (*n == '/')
48415+ return 0;
48416+
48417+ return 1;
48418+}
48419+
48420+static struct acl_object_label *
48421+chk_glob_label(struct acl_object_label *globbed,
48422+ struct dentry *dentry, struct vfsmount *mnt, char **path)
48423+{
48424+ struct acl_object_label *tmp;
48425+
48426+ if (*path == NULL)
48427+ *path = gr_to_filename_nolock(dentry, mnt);
48428+
48429+ tmp = globbed;
48430+
48431+ while (tmp) {
48432+ if (!glob_match(tmp->filename, *path))
48433+ return tmp;
48434+ tmp = tmp->next;
48435+ }
48436+
48437+ return NULL;
48438+}
48439+
48440+static struct acl_object_label *
48441+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48442+ const ino_t curr_ino, const dev_t curr_dev,
48443+ const struct acl_subject_label *subj, char **path, const int checkglob)
48444+{
48445+ struct acl_subject_label *tmpsubj;
48446+ struct acl_object_label *retval;
48447+ struct acl_object_label *retval2;
48448+
48449+ tmpsubj = (struct acl_subject_label *) subj;
48450+ read_lock(&gr_inode_lock);
48451+ do {
48452+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
48453+ if (retval) {
48454+ if (checkglob && retval->globbed) {
48455+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
48456+ (struct vfsmount *)orig_mnt, path);
48457+ if (retval2)
48458+ retval = retval2;
48459+ }
48460+ break;
48461+ }
48462+ } while ((tmpsubj = tmpsubj->parent_subject));
48463+ read_unlock(&gr_inode_lock);
48464+
48465+ return retval;
48466+}
48467+
48468+static __inline__ struct acl_object_label *
48469+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48470+ const struct dentry *curr_dentry,
48471+ const struct acl_subject_label *subj, char **path, const int checkglob)
48472+{
48473+ int newglob = checkglob;
48474+
48475+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
48476+ as we don't want a / * rule to match instead of the / object
48477+ don't do this for create lookups that call this function though, since they're looking up
48478+ on the parent and thus need globbing checks on all paths
48479+ */
48480+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
48481+ newglob = GR_NO_GLOB;
48482+
48483+ return __full_lookup(orig_dentry, orig_mnt,
48484+ curr_dentry->d_inode->i_ino,
48485+ __get_dev(curr_dentry), subj, path, newglob);
48486+}
48487+
48488+static struct acl_object_label *
48489+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48490+ const struct acl_subject_label *subj, char *path, const int checkglob)
48491+{
48492+ struct dentry *dentry = (struct dentry *) l_dentry;
48493+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48494+ struct acl_object_label *retval;
48495+
48496+ spin_lock(&dcache_lock);
48497+ spin_lock(&vfsmount_lock);
48498+
48499+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
48500+#ifdef CONFIG_NET
48501+ mnt == sock_mnt ||
48502+#endif
48503+#ifdef CONFIG_HUGETLBFS
48504+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
48505+#endif
48506+ /* ignore Eric Biederman */
48507+ IS_PRIVATE(l_dentry->d_inode))) {
48508+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
48509+ goto out;
48510+ }
48511+
48512+ for (;;) {
48513+ if (dentry == real_root && mnt == real_root_mnt)
48514+ break;
48515+
48516+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48517+ if (mnt->mnt_parent == mnt)
48518+ break;
48519+
48520+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48521+ if (retval != NULL)
48522+ goto out;
48523+
48524+ dentry = mnt->mnt_mountpoint;
48525+ mnt = mnt->mnt_parent;
48526+ continue;
48527+ }
48528+
48529+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48530+ if (retval != NULL)
48531+ goto out;
48532+
48533+ dentry = dentry->d_parent;
48534+ }
48535+
48536+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48537+
48538+ if (retval == NULL)
48539+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
48540+out:
48541+ spin_unlock(&vfsmount_lock);
48542+ spin_unlock(&dcache_lock);
48543+
48544+ BUG_ON(retval == NULL);
48545+
48546+ return retval;
48547+}
48548+
48549+static __inline__ struct acl_object_label *
48550+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48551+ const struct acl_subject_label *subj)
48552+{
48553+ char *path = NULL;
48554+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
48555+}
48556+
48557+static __inline__ struct acl_object_label *
48558+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48559+ const struct acl_subject_label *subj)
48560+{
48561+ char *path = NULL;
48562+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
48563+}
48564+
48565+static __inline__ struct acl_object_label *
48566+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48567+ const struct acl_subject_label *subj, char *path)
48568+{
48569+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
48570+}
48571+
48572+static struct acl_subject_label *
48573+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48574+ const struct acl_role_label *role)
48575+{
48576+ struct dentry *dentry = (struct dentry *) l_dentry;
48577+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48578+ struct acl_subject_label *retval;
48579+
48580+ spin_lock(&dcache_lock);
48581+ spin_lock(&vfsmount_lock);
48582+
48583+ for (;;) {
48584+ if (dentry == real_root && mnt == real_root_mnt)
48585+ break;
48586+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48587+ if (mnt->mnt_parent == mnt)
48588+ break;
48589+
48590+ read_lock(&gr_inode_lock);
48591+ retval =
48592+ lookup_acl_subj_label(dentry->d_inode->i_ino,
48593+ __get_dev(dentry), role);
48594+ read_unlock(&gr_inode_lock);
48595+ if (retval != NULL)
48596+ goto out;
48597+
48598+ dentry = mnt->mnt_mountpoint;
48599+ mnt = mnt->mnt_parent;
48600+ continue;
48601+ }
48602+
48603+ read_lock(&gr_inode_lock);
48604+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48605+ __get_dev(dentry), role);
48606+ read_unlock(&gr_inode_lock);
48607+ if (retval != NULL)
48608+ goto out;
48609+
48610+ dentry = dentry->d_parent;
48611+ }
48612+
48613+ read_lock(&gr_inode_lock);
48614+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
48615+ __get_dev(dentry), role);
48616+ read_unlock(&gr_inode_lock);
48617+
48618+ if (unlikely(retval == NULL)) {
48619+ read_lock(&gr_inode_lock);
48620+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
48621+ __get_dev(real_root), role);
48622+ read_unlock(&gr_inode_lock);
48623+ }
48624+out:
48625+ spin_unlock(&vfsmount_lock);
48626+ spin_unlock(&dcache_lock);
48627+
48628+ BUG_ON(retval == NULL);
48629+
48630+ return retval;
48631+}
48632+
48633+static void
48634+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
48635+{
48636+ struct task_struct *task = current;
48637+ const struct cred *cred = current_cred();
48638+
48639+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48640+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48641+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48642+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
48643+
48644+ return;
48645+}
48646+
48647+static void
48648+gr_log_learn_sysctl(const char *path, const __u32 mode)
48649+{
48650+ struct task_struct *task = current;
48651+ const struct cred *cred = current_cred();
48652+
48653+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
48654+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48655+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48656+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
48657+
48658+ return;
48659+}
48660+
48661+static void
48662+gr_log_learn_id_change(const char type, const unsigned int real,
48663+ const unsigned int effective, const unsigned int fs)
48664+{
48665+ struct task_struct *task = current;
48666+ const struct cred *cred = current_cred();
48667+
48668+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
48669+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
48670+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
48671+ type, real, effective, fs, &task->signal->saved_ip);
48672+
48673+ return;
48674+}
48675+
48676+__u32
48677+gr_check_link(const struct dentry * new_dentry,
48678+ const struct dentry * parent_dentry,
48679+ const struct vfsmount * parent_mnt,
48680+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
48681+{
48682+ struct acl_object_label *obj;
48683+ __u32 oldmode, newmode;
48684+ __u32 needmode;
48685+
48686+ if (unlikely(!(gr_status & GR_READY)))
48687+ return (GR_CREATE | GR_LINK);
48688+
48689+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
48690+ oldmode = obj->mode;
48691+
48692+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48693+ oldmode |= (GR_CREATE | GR_LINK);
48694+
48695+ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
48696+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
48697+ needmode |= GR_SETID | GR_AUDIT_SETID;
48698+
48699+ newmode =
48700+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
48701+ oldmode | needmode);
48702+
48703+ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
48704+ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
48705+ GR_INHERIT | GR_AUDIT_INHERIT);
48706+
48707+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
48708+ goto bad;
48709+
48710+ if ((oldmode & needmode) != needmode)
48711+ goto bad;
48712+
48713+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
48714+ if ((newmode & needmode) != needmode)
48715+ goto bad;
48716+
48717+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
48718+ return newmode;
48719+bad:
48720+ needmode = oldmode;
48721+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
48722+ needmode |= GR_SETID;
48723+
48724+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
48725+ gr_log_learn(old_dentry, old_mnt, needmode);
48726+ return (GR_CREATE | GR_LINK);
48727+ } else if (newmode & GR_SUPPRESS)
48728+ return GR_SUPPRESS;
48729+ else
48730+ return 0;
48731+}
48732+
48733+__u32
48734+gr_search_file(const struct dentry * dentry, const __u32 mode,
48735+ const struct vfsmount * mnt)
48736+{
48737+ __u32 retval = mode;
48738+ struct acl_subject_label *curracl;
48739+ struct acl_object_label *currobj;
48740+
48741+ if (unlikely(!(gr_status & GR_READY)))
48742+ return (mode & ~GR_AUDITS);
48743+
48744+ curracl = current->acl;
48745+
48746+ currobj = chk_obj_label(dentry, mnt, curracl);
48747+ retval = currobj->mode & mode;
48748+
48749+ /* if we're opening a specified transfer file for writing
48750+ (e.g. /dev/initctl), then transfer our role to init
48751+ */
48752+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
48753+ current->role->roletype & GR_ROLE_PERSIST)) {
48754+ struct task_struct *task = init_pid_ns.child_reaper;
48755+
48756+ if (task->role != current->role) {
48757+ task->acl_sp_role = 0;
48758+ task->acl_role_id = current->acl_role_id;
48759+ task->role = current->role;
48760+ rcu_read_lock();
48761+ read_lock(&grsec_exec_file_lock);
48762+ gr_apply_subject_to_task(task);
48763+ read_unlock(&grsec_exec_file_lock);
48764+ rcu_read_unlock();
48765+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
48766+ }
48767+ }
48768+
48769+ if (unlikely
48770+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
48771+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
48772+ __u32 new_mode = mode;
48773+
48774+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48775+
48776+ retval = new_mode;
48777+
48778+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
48779+ new_mode |= GR_INHERIT;
48780+
48781+ if (!(mode & GR_NOLEARN))
48782+ gr_log_learn(dentry, mnt, new_mode);
48783+ }
48784+
48785+ return retval;
48786+}
48787+
48788+__u32
48789+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
48790+ const struct vfsmount * mnt, const __u32 mode)
48791+{
48792+ struct name_entry *match;
48793+ struct acl_object_label *matchpo;
48794+ struct acl_subject_label *curracl;
48795+ char *path;
48796+ __u32 retval;
48797+
48798+ if (unlikely(!(gr_status & GR_READY)))
48799+ return (mode & ~GR_AUDITS);
48800+
48801+ preempt_disable();
48802+ path = gr_to_filename_rbac(new_dentry, mnt);
48803+ match = lookup_name_entry_create(path);
48804+
48805+ if (!match)
48806+ goto check_parent;
48807+
48808+ curracl = current->acl;
48809+
48810+ read_lock(&gr_inode_lock);
48811+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
48812+ read_unlock(&gr_inode_lock);
48813+
48814+ if (matchpo) {
48815+ if ((matchpo->mode & mode) !=
48816+ (mode & ~(GR_AUDITS | GR_SUPPRESS))
48817+ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
48818+ __u32 new_mode = mode;
48819+
48820+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48821+
48822+ gr_log_learn(new_dentry, mnt, new_mode);
48823+
48824+ preempt_enable();
48825+ return new_mode;
48826+ }
48827+ preempt_enable();
48828+ return (matchpo->mode & mode);
48829+ }
48830+
48831+ check_parent:
48832+ curracl = current->acl;
48833+
48834+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
48835+ retval = matchpo->mode & mode;
48836+
48837+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
48838+ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
48839+ __u32 new_mode = mode;
48840+
48841+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48842+
48843+ gr_log_learn(new_dentry, mnt, new_mode);
48844+ preempt_enable();
48845+ return new_mode;
48846+ }
48847+
48848+ preempt_enable();
48849+ return retval;
48850+}
48851+
48852+int
48853+gr_check_hidden_task(const struct task_struct *task)
48854+{
48855+ if (unlikely(!(gr_status & GR_READY)))
48856+ return 0;
48857+
48858+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
48859+ return 1;
48860+
48861+ return 0;
48862+}
48863+
48864+int
48865+gr_check_protected_task(const struct task_struct *task)
48866+{
48867+ if (unlikely(!(gr_status & GR_READY) || !task))
48868+ return 0;
48869+
48870+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
48871+ task->acl != current->acl)
48872+ return 1;
48873+
48874+ return 0;
48875+}
48876+
48877+int
48878+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
48879+{
48880+ struct task_struct *p;
48881+ int ret = 0;
48882+
48883+ if (unlikely(!(gr_status & GR_READY) || !pid))
48884+ return ret;
48885+
48886+ read_lock(&tasklist_lock);
48887+ do_each_pid_task(pid, type, p) {
48888+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
48889+ p->acl != current->acl) {
48890+ ret = 1;
48891+ goto out;
48892+ }
48893+ } while_each_pid_task(pid, type, p);
48894+out:
48895+ read_unlock(&tasklist_lock);
48896+
48897+ return ret;
48898+}
48899+
48900+void
48901+gr_copy_label(struct task_struct *tsk)
48902+{
48903+ tsk->signal->used_accept = 0;
48904+ tsk->acl_sp_role = 0;
48905+ tsk->acl_role_id = current->acl_role_id;
48906+ tsk->acl = current->acl;
48907+ tsk->role = current->role;
48908+ tsk->signal->curr_ip = current->signal->curr_ip;
48909+ tsk->signal->saved_ip = current->signal->saved_ip;
48910+ if (current->exec_file)
48911+ get_file(current->exec_file);
48912+ tsk->exec_file = current->exec_file;
48913+ tsk->is_writable = current->is_writable;
48914+ if (unlikely(current->signal->used_accept)) {
48915+ current->signal->curr_ip = 0;
48916+ current->signal->saved_ip = 0;
48917+ }
48918+
48919+ return;
48920+}
48921+
48922+static void
48923+gr_set_proc_res(struct task_struct *task)
48924+{
48925+ struct acl_subject_label *proc;
48926+ unsigned short i;
48927+
48928+ proc = task->acl;
48929+
48930+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
48931+ return;
48932+
48933+ for (i = 0; i < RLIM_NLIMITS; i++) {
48934+ if (!(proc->resmask & (1 << i)))
48935+ continue;
48936+
48937+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
48938+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
48939+ }
48940+
48941+ return;
48942+}
48943+
48944+extern int __gr_process_user_ban(struct user_struct *user);
48945+
48946+int
48947+gr_check_user_change(int real, int effective, int fs)
48948+{
48949+ unsigned int i;
48950+ __u16 num;
48951+ uid_t *uidlist;
48952+ int curuid;
48953+ int realok = 0;
48954+ int effectiveok = 0;
48955+ int fsok = 0;
48956+
48957+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
48958+ struct user_struct *user;
48959+
48960+ if (real == -1)
48961+ goto skipit;
48962+
48963+ user = find_user(real);
48964+ if (user == NULL)
48965+ goto skipit;
48966+
48967+ if (__gr_process_user_ban(user)) {
48968+ /* for find_user */
48969+ free_uid(user);
48970+ return 1;
48971+ }
48972+
48973+ /* for find_user */
48974+ free_uid(user);
48975+
48976+skipit:
48977+#endif
48978+
48979+ if (unlikely(!(gr_status & GR_READY)))
48980+ return 0;
48981+
48982+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48983+ gr_log_learn_id_change('u', real, effective, fs);
48984+
48985+ num = current->acl->user_trans_num;
48986+ uidlist = current->acl->user_transitions;
48987+
48988+ if (uidlist == NULL)
48989+ return 0;
48990+
48991+ if (real == -1)
48992+ realok = 1;
48993+ if (effective == -1)
48994+ effectiveok = 1;
48995+ if (fs == -1)
48996+ fsok = 1;
48997+
48998+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
48999+ for (i = 0; i < num; i++) {
49000+ curuid = (int)uidlist[i];
49001+ if (real == curuid)
49002+ realok = 1;
49003+ if (effective == curuid)
49004+ effectiveok = 1;
49005+ if (fs == curuid)
49006+ fsok = 1;
49007+ }
49008+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
49009+ for (i = 0; i < num; i++) {
49010+ curuid = (int)uidlist[i];
49011+ if (real == curuid)
49012+ break;
49013+ if (effective == curuid)
49014+ break;
49015+ if (fs == curuid)
49016+ break;
49017+ }
49018+ /* not in deny list */
49019+ if (i == num) {
49020+ realok = 1;
49021+ effectiveok = 1;
49022+ fsok = 1;
49023+ }
49024+ }
49025+
49026+ if (realok && effectiveok && fsok)
49027+ return 0;
49028+ else {
49029+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49030+ return 1;
49031+ }
49032+}
49033+
49034+int
49035+gr_check_group_change(int real, int effective, int fs)
49036+{
49037+ unsigned int i;
49038+ __u16 num;
49039+ gid_t *gidlist;
49040+ int curgid;
49041+ int realok = 0;
49042+ int effectiveok = 0;
49043+ int fsok = 0;
49044+
49045+ if (unlikely(!(gr_status & GR_READY)))
49046+ return 0;
49047+
49048+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49049+ gr_log_learn_id_change('g', real, effective, fs);
49050+
49051+ num = current->acl->group_trans_num;
49052+ gidlist = current->acl->group_transitions;
49053+
49054+ if (gidlist == NULL)
49055+ return 0;
49056+
49057+ if (real == -1)
49058+ realok = 1;
49059+ if (effective == -1)
49060+ effectiveok = 1;
49061+ if (fs == -1)
49062+ fsok = 1;
49063+
49064+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
49065+ for (i = 0; i < num; i++) {
49066+ curgid = (int)gidlist[i];
49067+ if (real == curgid)
49068+ realok = 1;
49069+ if (effective == curgid)
49070+ effectiveok = 1;
49071+ if (fs == curgid)
49072+ fsok = 1;
49073+ }
49074+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
49075+ for (i = 0; i < num; i++) {
49076+ curgid = (int)gidlist[i];
49077+ if (real == curgid)
49078+ break;
49079+ if (effective == curgid)
49080+ break;
49081+ if (fs == curgid)
49082+ break;
49083+ }
49084+ /* not in deny list */
49085+ if (i == num) {
49086+ realok = 1;
49087+ effectiveok = 1;
49088+ fsok = 1;
49089+ }
49090+ }
49091+
49092+ if (realok && effectiveok && fsok)
49093+ return 0;
49094+ else {
49095+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49096+ return 1;
49097+ }
49098+}
49099+
49100+void
49101+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49102+{
49103+ struct acl_role_label *role = task->role;
49104+ struct acl_subject_label *subj = NULL;
49105+ struct acl_object_label *obj;
49106+ struct file *filp;
49107+
49108+ if (unlikely(!(gr_status & GR_READY)))
49109+ return;
49110+
49111+ filp = task->exec_file;
49112+
49113+ /* kernel process, we'll give them the kernel role */
49114+ if (unlikely(!filp)) {
49115+ task->role = kernel_role;
49116+ task->acl = kernel_role->root_label;
49117+ return;
49118+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49119+ role = lookup_acl_role_label(task, uid, gid);
49120+
49121+ /* perform subject lookup in possibly new role
49122+ we can use this result below in the case where role == task->role
49123+ */
49124+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49125+
49126+ /* if we changed uid/gid, but result in the same role
49127+ and are using inheritance, don't lose the inherited subject
49128+ if current subject is other than what normal lookup
49129+ would result in, we arrived via inheritance, don't
49130+ lose subject
49131+ */
49132+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49133+ (subj == task->acl)))
49134+ task->acl = subj;
49135+
49136+ task->role = role;
49137+
49138+ task->is_writable = 0;
49139+
49140+ /* ignore additional mmap checks for processes that are writable
49141+ by the default ACL */
49142+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49143+ if (unlikely(obj->mode & GR_WRITE))
49144+ task->is_writable = 1;
49145+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49146+ if (unlikely(obj->mode & GR_WRITE))
49147+ task->is_writable = 1;
49148+
49149+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49150+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49151+#endif
49152+
49153+ gr_set_proc_res(task);
49154+
49155+ return;
49156+}
49157+
49158+int
49159+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49160+ const int unsafe_share)
49161+{
49162+ struct task_struct *task = current;
49163+ struct acl_subject_label *newacl;
49164+ struct acl_object_label *obj;
49165+ __u32 retmode;
49166+
49167+ if (unlikely(!(gr_status & GR_READY)))
49168+ return 0;
49169+
49170+ newacl = chk_subj_label(dentry, mnt, task->role);
49171+
49172+ task_lock(task);
49173+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49174+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49175+ !(task->role->roletype & GR_ROLE_GOD) &&
49176+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49177+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49178+ task_unlock(task);
49179+ if (unsafe_share)
49180+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
49181+ else
49182+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
49183+ return -EACCES;
49184+ }
49185+ task_unlock(task);
49186+
49187+ obj = chk_obj_label(dentry, mnt, task->acl);
49188+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
49189+
49190+ if (!(task->acl->mode & GR_INHERITLEARN) &&
49191+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
49192+ if (obj->nested)
49193+ task->acl = obj->nested;
49194+ else
49195+ task->acl = newacl;
49196+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
49197+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
49198+
49199+ task->is_writable = 0;
49200+
49201+ /* ignore additional mmap checks for processes that are writable
49202+ by the default ACL */
49203+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
49204+ if (unlikely(obj->mode & GR_WRITE))
49205+ task->is_writable = 1;
49206+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
49207+ if (unlikely(obj->mode & GR_WRITE))
49208+ task->is_writable = 1;
49209+
49210+ gr_set_proc_res(task);
49211+
49212+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49213+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49214+#endif
49215+ return 0;
49216+}
49217+
49218+/* always called with valid inodev ptr */
49219+static void
49220+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
49221+{
49222+ struct acl_object_label *matchpo;
49223+ struct acl_subject_label *matchps;
49224+ struct acl_subject_label *subj;
49225+ struct acl_role_label *role;
49226+ unsigned int x;
49227+
49228+ FOR_EACH_ROLE_START(role)
49229+ FOR_EACH_SUBJECT_START(role, subj, x)
49230+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
49231+ matchpo->mode |= GR_DELETED;
49232+ FOR_EACH_SUBJECT_END(subj,x)
49233+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49234+ if (subj->inode == ino && subj->device == dev)
49235+ subj->mode |= GR_DELETED;
49236+ FOR_EACH_NESTED_SUBJECT_END(subj)
49237+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
49238+ matchps->mode |= GR_DELETED;
49239+ FOR_EACH_ROLE_END(role)
49240+
49241+ inodev->nentry->deleted = 1;
49242+
49243+ return;
49244+}
49245+
49246+void
49247+gr_handle_delete(const ino_t ino, const dev_t dev)
49248+{
49249+ struct inodev_entry *inodev;
49250+
49251+ if (unlikely(!(gr_status & GR_READY)))
49252+ return;
49253+
49254+ write_lock(&gr_inode_lock);
49255+ inodev = lookup_inodev_entry(ino, dev);
49256+ if (inodev != NULL)
49257+ do_handle_delete(inodev, ino, dev);
49258+ write_unlock(&gr_inode_lock);
49259+
49260+ return;
49261+}
49262+
49263+static void
49264+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
49265+ const ino_t newinode, const dev_t newdevice,
49266+ struct acl_subject_label *subj)
49267+{
49268+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
49269+ struct acl_object_label *match;
49270+
49271+ match = subj->obj_hash[index];
49272+
49273+ while (match && (match->inode != oldinode ||
49274+ match->device != olddevice ||
49275+ !(match->mode & GR_DELETED)))
49276+ match = match->next;
49277+
49278+ if (match && (match->inode == oldinode)
49279+ && (match->device == olddevice)
49280+ && (match->mode & GR_DELETED)) {
49281+ if (match->prev == NULL) {
49282+ subj->obj_hash[index] = match->next;
49283+ if (match->next != NULL)
49284+ match->next->prev = NULL;
49285+ } else {
49286+ match->prev->next = match->next;
49287+ if (match->next != NULL)
49288+ match->next->prev = match->prev;
49289+ }
49290+ match->prev = NULL;
49291+ match->next = NULL;
49292+ match->inode = newinode;
49293+ match->device = newdevice;
49294+ match->mode &= ~GR_DELETED;
49295+
49296+ insert_acl_obj_label(match, subj);
49297+ }
49298+
49299+ return;
49300+}
49301+
49302+static void
49303+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
49304+ const ino_t newinode, const dev_t newdevice,
49305+ struct acl_role_label *role)
49306+{
49307+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
49308+ struct acl_subject_label *match;
49309+
49310+ match = role->subj_hash[index];
49311+
49312+ while (match && (match->inode != oldinode ||
49313+ match->device != olddevice ||
49314+ !(match->mode & GR_DELETED)))
49315+ match = match->next;
49316+
49317+ if (match && (match->inode == oldinode)
49318+ && (match->device == olddevice)
49319+ && (match->mode & GR_DELETED)) {
49320+ if (match->prev == NULL) {
49321+ role->subj_hash[index] = match->next;
49322+ if (match->next != NULL)
49323+ match->next->prev = NULL;
49324+ } else {
49325+ match->prev->next = match->next;
49326+ if (match->next != NULL)
49327+ match->next->prev = match->prev;
49328+ }
49329+ match->prev = NULL;
49330+ match->next = NULL;
49331+ match->inode = newinode;
49332+ match->device = newdevice;
49333+ match->mode &= ~GR_DELETED;
49334+
49335+ insert_acl_subj_label(match, role);
49336+ }
49337+
49338+ return;
49339+}
49340+
49341+static void
49342+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
49343+ const ino_t newinode, const dev_t newdevice)
49344+{
49345+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
49346+ struct inodev_entry *match;
49347+
49348+ match = inodev_set.i_hash[index];
49349+
49350+ while (match && (match->nentry->inode != oldinode ||
49351+ match->nentry->device != olddevice || !match->nentry->deleted))
49352+ match = match->next;
49353+
49354+ if (match && (match->nentry->inode == oldinode)
49355+ && (match->nentry->device == olddevice) &&
49356+ match->nentry->deleted) {
49357+ if (match->prev == NULL) {
49358+ inodev_set.i_hash[index] = match->next;
49359+ if (match->next != NULL)
49360+ match->next->prev = NULL;
49361+ } else {
49362+ match->prev->next = match->next;
49363+ if (match->next != NULL)
49364+ match->next->prev = match->prev;
49365+ }
49366+ match->prev = NULL;
49367+ match->next = NULL;
49368+ match->nentry->inode = newinode;
49369+ match->nentry->device = newdevice;
49370+ match->nentry->deleted = 0;
49371+
49372+ insert_inodev_entry(match);
49373+ }
49374+
49375+ return;
49376+}
49377+
49378+static void
49379+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
49380+ const struct vfsmount *mnt)
49381+{
49382+ struct acl_subject_label *subj;
49383+ struct acl_role_label *role;
49384+ unsigned int x;
49385+ ino_t inode = dentry->d_inode->i_ino;
49386+ dev_t dev = __get_dev(dentry);
49387+
49388+ FOR_EACH_ROLE_START(role)
49389+ update_acl_subj_label(matchn->inode, matchn->device,
49390+ inode, dev, role);
49391+
49392+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49393+ if ((subj->inode == inode) && (subj->device == dev)) {
49394+ subj->inode = inode;
49395+ subj->device = dev;
49396+ }
49397+ FOR_EACH_NESTED_SUBJECT_END(subj)
49398+ FOR_EACH_SUBJECT_START(role, subj, x)
49399+ update_acl_obj_label(matchn->inode, matchn->device,
49400+ inode, dev, subj);
49401+ FOR_EACH_SUBJECT_END(subj,x)
49402+ FOR_EACH_ROLE_END(role)
49403+
49404+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
49405+
49406+ return;
49407+}
49408+
49409+void
49410+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
49411+{
49412+ struct name_entry *matchn;
49413+
49414+ if (unlikely(!(gr_status & GR_READY)))
49415+ return;
49416+
49417+ preempt_disable();
49418+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
49419+
49420+ if (unlikely((unsigned long)matchn)) {
49421+ write_lock(&gr_inode_lock);
49422+ do_handle_create(matchn, dentry, mnt);
49423+ write_unlock(&gr_inode_lock);
49424+ }
49425+ preempt_enable();
49426+
49427+ return;
49428+}
49429+
49430+void
49431+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
49432+ struct dentry *old_dentry,
49433+ struct dentry *new_dentry,
49434+ struct vfsmount *mnt, const __u8 replace)
49435+{
49436+ struct name_entry *matchn;
49437+ struct inodev_entry *inodev;
49438+ ino_t oldinode = old_dentry->d_inode->i_ino;
49439+ dev_t olddev = __get_dev(old_dentry);
49440+
49441+ /* vfs_rename swaps the name and parent link for old_dentry and
49442+ new_dentry
49443+ at this point, old_dentry has the new name, parent link, and inode
49444+ for the renamed file
49445+ if a file is being replaced by a rename, new_dentry has the inode
49446+ and name for the replaced file
49447+ */
49448+
49449+ if (unlikely(!(gr_status & GR_READY)))
49450+ return;
49451+
49452+ preempt_disable();
49453+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
49454+
49455+ /* we wouldn't have to check d_inode if it weren't for
49456+ NFS silly-renaming
49457+ */
49458+
49459+ write_lock(&gr_inode_lock);
49460+ if (unlikely(replace && new_dentry->d_inode)) {
49461+ ino_t newinode = new_dentry->d_inode->i_ino;
49462+ dev_t newdev = __get_dev(new_dentry);
49463+ inodev = lookup_inodev_entry(newinode, newdev);
49464+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
49465+ do_handle_delete(inodev, newinode, newdev);
49466+ }
49467+
49468+ inodev = lookup_inodev_entry(oldinode, olddev);
49469+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
49470+ do_handle_delete(inodev, oldinode, olddev);
49471+
49472+ if (unlikely((unsigned long)matchn))
49473+ do_handle_create(matchn, old_dentry, mnt);
49474+
49475+ write_unlock(&gr_inode_lock);
49476+ preempt_enable();
49477+
49478+ return;
49479+}
49480+
49481+static int
49482+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
49483+ unsigned char **sum)
49484+{
49485+ struct acl_role_label *r;
49486+ struct role_allowed_ip *ipp;
49487+ struct role_transition *trans;
49488+ unsigned int i;
49489+ int found = 0;
49490+ u32 curr_ip = current->signal->curr_ip;
49491+
49492+ current->signal->saved_ip = curr_ip;
49493+
49494+ /* check transition table */
49495+
49496+ for (trans = current->role->transitions; trans; trans = trans->next) {
49497+ if (!strcmp(rolename, trans->rolename)) {
49498+ found = 1;
49499+ break;
49500+ }
49501+ }
49502+
49503+ if (!found)
49504+ return 0;
49505+
49506+ /* handle special roles that do not require authentication
49507+ and check ip */
49508+
49509+ FOR_EACH_ROLE_START(r)
49510+ if (!strcmp(rolename, r->rolename) &&
49511+ (r->roletype & GR_ROLE_SPECIAL)) {
49512+ found = 0;
49513+ if (r->allowed_ips != NULL) {
49514+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
49515+ if ((ntohl(curr_ip) & ipp->netmask) ==
49516+ (ntohl(ipp->addr) & ipp->netmask))
49517+ found = 1;
49518+ }
49519+ } else
49520+ found = 2;
49521+ if (!found)
49522+ return 0;
49523+
49524+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
49525+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
49526+ *salt = NULL;
49527+ *sum = NULL;
49528+ return 1;
49529+ }
49530+ }
49531+ FOR_EACH_ROLE_END(r)
49532+
49533+ for (i = 0; i < num_sprole_pws; i++) {
49534+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
49535+ *salt = acl_special_roles[i]->salt;
49536+ *sum = acl_special_roles[i]->sum;
49537+ return 1;
49538+ }
49539+ }
49540+
49541+ return 0;
49542+}
49543+
49544+static void
49545+assign_special_role(char *rolename)
49546+{
49547+ struct acl_object_label *obj;
49548+ struct acl_role_label *r;
49549+ struct acl_role_label *assigned = NULL;
49550+ struct task_struct *tsk;
49551+ struct file *filp;
49552+
49553+ FOR_EACH_ROLE_START(r)
49554+ if (!strcmp(rolename, r->rolename) &&
49555+ (r->roletype & GR_ROLE_SPECIAL)) {
49556+ assigned = r;
49557+ break;
49558+ }
49559+ FOR_EACH_ROLE_END(r)
49560+
49561+ if (!assigned)
49562+ return;
49563+
49564+ read_lock(&tasklist_lock);
49565+ read_lock(&grsec_exec_file_lock);
49566+
49567+ tsk = current->real_parent;
49568+ if (tsk == NULL)
49569+ goto out_unlock;
49570+
49571+ filp = tsk->exec_file;
49572+ if (filp == NULL)
49573+ goto out_unlock;
49574+
49575+ tsk->is_writable = 0;
49576+
49577+ tsk->acl_sp_role = 1;
49578+ tsk->acl_role_id = ++acl_sp_role_value;
49579+ tsk->role = assigned;
49580+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
49581+
49582+ /* ignore additional mmap checks for processes that are writable
49583+ by the default ACL */
49584+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49585+ if (unlikely(obj->mode & GR_WRITE))
49586+ tsk->is_writable = 1;
49587+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
49588+ if (unlikely(obj->mode & GR_WRITE))
49589+ tsk->is_writable = 1;
49590+
49591+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49592+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
49593+#endif
49594+
49595+out_unlock:
49596+ read_unlock(&grsec_exec_file_lock);
49597+ read_unlock(&tasklist_lock);
49598+ return;
49599+}
49600+
49601+int gr_check_secure_terminal(struct task_struct *task)
49602+{
49603+ struct task_struct *p, *p2, *p3;
49604+ struct files_struct *files;
49605+ struct fdtable *fdt;
49606+ struct file *our_file = NULL, *file;
49607+ int i;
49608+
49609+ if (task->signal->tty == NULL)
49610+ return 1;
49611+
49612+ files = get_files_struct(task);
49613+ if (files != NULL) {
49614+ rcu_read_lock();
49615+ fdt = files_fdtable(files);
49616+ for (i=0; i < fdt->max_fds; i++) {
49617+ file = fcheck_files(files, i);
49618+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
49619+ get_file(file);
49620+ our_file = file;
49621+ }
49622+ }
49623+ rcu_read_unlock();
49624+ put_files_struct(files);
49625+ }
49626+
49627+ if (our_file == NULL)
49628+ return 1;
49629+
49630+ read_lock(&tasklist_lock);
49631+ do_each_thread(p2, p) {
49632+ files = get_files_struct(p);
49633+ if (files == NULL ||
49634+ (p->signal && p->signal->tty == task->signal->tty)) {
49635+ if (files != NULL)
49636+ put_files_struct(files);
49637+ continue;
49638+ }
49639+ rcu_read_lock();
49640+ fdt = files_fdtable(files);
49641+ for (i=0; i < fdt->max_fds; i++) {
49642+ file = fcheck_files(files, i);
49643+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
49644+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
49645+ p3 = task;
49646+ while (p3->pid > 0) {
49647+ if (p3 == p)
49648+ break;
49649+ p3 = p3->real_parent;
49650+ }
49651+ if (p3 == p)
49652+ break;
49653+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
49654+ gr_handle_alertkill(p);
49655+ rcu_read_unlock();
49656+ put_files_struct(files);
49657+ read_unlock(&tasklist_lock);
49658+ fput(our_file);
49659+ return 0;
49660+ }
49661+ }
49662+ rcu_read_unlock();
49663+ put_files_struct(files);
49664+ } while_each_thread(p2, p);
49665+ read_unlock(&tasklist_lock);
49666+
49667+ fput(our_file);
49668+ return 1;
49669+}
49670+
49671+ssize_t
49672+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
49673+{
49674+ struct gr_arg_wrapper uwrap;
49675+ unsigned char *sprole_salt = NULL;
49676+ unsigned char *sprole_sum = NULL;
49677+ int error = sizeof (struct gr_arg_wrapper);
49678+ int error2 = 0;
49679+
49680+ mutex_lock(&gr_dev_mutex);
49681+
49682+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
49683+ error = -EPERM;
49684+ goto out;
49685+ }
49686+
49687+ if (count != sizeof (struct gr_arg_wrapper)) {
49688+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
49689+ error = -EINVAL;
49690+ goto out;
49691+ }
49692+
49693+
49694+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
49695+ gr_auth_expires = 0;
49696+ gr_auth_attempts = 0;
49697+ }
49698+
49699+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
49700+ error = -EFAULT;
49701+ goto out;
49702+ }
49703+
49704+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
49705+ error = -EINVAL;
49706+ goto out;
49707+ }
49708+
49709+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
49710+ error = -EFAULT;
49711+ goto out;
49712+ }
49713+
49714+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
49715+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
49716+ time_after(gr_auth_expires, get_seconds())) {
49717+ error = -EBUSY;
49718+ goto out;
49719+ }
49720+
49721+ /* if non-root trying to do anything other than use a special role,
49722+ do not attempt authentication, do not count towards authentication
49723+ locking
49724+ */
49725+
49726+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
49727+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
49728+ current_uid()) {
49729+ error = -EPERM;
49730+ goto out;
49731+ }
49732+
49733+ /* ensure pw and special role name are null terminated */
49734+
49735+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
49736+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
49737+
49738+ /* Okay.
49739+ * We have our enough of the argument structure..(we have yet
49740+ * to copy_from_user the tables themselves) . Copy the tables
49741+ * only if we need them, i.e. for loading operations. */
49742+
49743+ switch (gr_usermode->mode) {
49744+ case GR_STATUS:
49745+ if (gr_status & GR_READY) {
49746+ error = 1;
49747+ if (!gr_check_secure_terminal(current))
49748+ error = 3;
49749+ } else
49750+ error = 2;
49751+ goto out;
49752+ case GR_SHUTDOWN:
49753+ if ((gr_status & GR_READY)
49754+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
49755+ pax_open_kernel();
49756+ gr_status &= ~GR_READY;
49757+ pax_close_kernel();
49758+
49759+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
49760+ free_variables();
49761+ memset(gr_usermode, 0, sizeof (struct gr_arg));
49762+ memset(gr_system_salt, 0, GR_SALT_LEN);
49763+ memset(gr_system_sum, 0, GR_SHA_LEN);
49764+ } else if (gr_status & GR_READY) {
49765+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
49766+ error = -EPERM;
49767+ } else {
49768+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
49769+ error = -EAGAIN;
49770+ }
49771+ break;
49772+ case GR_ENABLE:
49773+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
49774+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
49775+ else {
49776+ if (gr_status & GR_READY)
49777+ error = -EAGAIN;
49778+ else
49779+ error = error2;
49780+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
49781+ }
49782+ break;
49783+ case GR_RELOAD:
49784+ if (!(gr_status & GR_READY)) {
49785+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
49786+ error = -EAGAIN;
49787+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
49788+ lock_kernel();
49789+
49790+ pax_open_kernel();
49791+ gr_status &= ~GR_READY;
49792+ pax_close_kernel();
49793+
49794+ free_variables();
49795+ if (!(error2 = gracl_init(gr_usermode))) {
49796+ unlock_kernel();
49797+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
49798+ } else {
49799+ unlock_kernel();
49800+ error = error2;
49801+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
49802+ }
49803+ } else {
49804+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
49805+ error = -EPERM;
49806+ }
49807+ break;
49808+ case GR_SEGVMOD:
49809+ if (unlikely(!(gr_status & GR_READY))) {
49810+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
49811+ error = -EAGAIN;
49812+ break;
49813+ }
49814+
49815+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
49816+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
49817+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
49818+ struct acl_subject_label *segvacl;
49819+ segvacl =
49820+ lookup_acl_subj_label(gr_usermode->segv_inode,
49821+ gr_usermode->segv_device,
49822+ current->role);
49823+ if (segvacl) {
49824+ segvacl->crashes = 0;
49825+ segvacl->expires = 0;
49826+ }
49827+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
49828+ gr_remove_uid(gr_usermode->segv_uid);
49829+ }
49830+ } else {
49831+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
49832+ error = -EPERM;
49833+ }
49834+ break;
49835+ case GR_SPROLE:
49836+ case GR_SPROLEPAM:
49837+ if (unlikely(!(gr_status & GR_READY))) {
49838+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
49839+ error = -EAGAIN;
49840+ break;
49841+ }
49842+
49843+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
49844+ current->role->expires = 0;
49845+ current->role->auth_attempts = 0;
49846+ }
49847+
49848+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
49849+ time_after(current->role->expires, get_seconds())) {
49850+ error = -EBUSY;
49851+ goto out;
49852+ }
49853+
49854+ if (lookup_special_role_auth
49855+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
49856+ && ((!sprole_salt && !sprole_sum)
49857+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
49858+ char *p = "";
49859+ assign_special_role(gr_usermode->sp_role);
49860+ read_lock(&tasklist_lock);
49861+ if (current->real_parent)
49862+ p = current->real_parent->role->rolename;
49863+ read_unlock(&tasklist_lock);
49864+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
49865+ p, acl_sp_role_value);
49866+ } else {
49867+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
49868+ error = -EPERM;
49869+ if(!(current->role->auth_attempts++))
49870+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
49871+
49872+ goto out;
49873+ }
49874+ break;
49875+ case GR_UNSPROLE:
49876+ if (unlikely(!(gr_status & GR_READY))) {
49877+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
49878+ error = -EAGAIN;
49879+ break;
49880+ }
49881+
49882+ if (current->role->roletype & GR_ROLE_SPECIAL) {
49883+ char *p = "";
49884+ int i = 0;
49885+
49886+ read_lock(&tasklist_lock);
49887+ if (current->real_parent) {
49888+ p = current->real_parent->role->rolename;
49889+ i = current->real_parent->acl_role_id;
49890+ }
49891+ read_unlock(&tasklist_lock);
49892+
49893+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
49894+ gr_set_acls(1);
49895+ } else {
49896+ error = -EPERM;
49897+ goto out;
49898+ }
49899+ break;
49900+ default:
49901+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
49902+ error = -EINVAL;
49903+ break;
49904+ }
49905+
49906+ if (error != -EPERM)
49907+ goto out;
49908+
49909+ if(!(gr_auth_attempts++))
49910+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
49911+
49912+ out:
49913+ mutex_unlock(&gr_dev_mutex);
49914+ return error;
49915+}
49916+
49917+/* must be called with
49918+ rcu_read_lock();
49919+ read_lock(&tasklist_lock);
49920+ read_lock(&grsec_exec_file_lock);
49921+*/
49922+int gr_apply_subject_to_task(struct task_struct *task)
49923+{
49924+ struct acl_object_label *obj;
49925+ char *tmpname;
49926+ struct acl_subject_label *tmpsubj;
49927+ struct file *filp;
49928+ struct name_entry *nmatch;
49929+
49930+ filp = task->exec_file;
49931+ if (filp == NULL)
49932+ return 0;
49933+
49934+ /* the following is to apply the correct subject
49935+ on binaries running when the RBAC system
49936+ is enabled, when the binaries have been
49937+ replaced or deleted since their execution
49938+ -----
49939+ when the RBAC system starts, the inode/dev
49940+ from exec_file will be one the RBAC system
49941+ is unaware of. It only knows the inode/dev
49942+ of the present file on disk, or the absence
49943+ of it.
49944+ */
49945+ preempt_disable();
49946+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
49947+
49948+ nmatch = lookup_name_entry(tmpname);
49949+ preempt_enable();
49950+ tmpsubj = NULL;
49951+ if (nmatch) {
49952+ if (nmatch->deleted)
49953+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
49954+ else
49955+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
49956+ if (tmpsubj != NULL)
49957+ task->acl = tmpsubj;
49958+ }
49959+ if (tmpsubj == NULL)
49960+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
49961+ task->role);
49962+ if (task->acl) {
49963+ task->is_writable = 0;
49964+ /* ignore additional mmap checks for processes that are writable
49965+ by the default ACL */
49966+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49967+ if (unlikely(obj->mode & GR_WRITE))
49968+ task->is_writable = 1;
49969+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49970+ if (unlikely(obj->mode & GR_WRITE))
49971+ task->is_writable = 1;
49972+
49973+ gr_set_proc_res(task);
49974+
49975+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49976+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49977+#endif
49978+ } else {
49979+ return 1;
49980+ }
49981+
49982+ return 0;
49983+}
49984+
49985+int
49986+gr_set_acls(const int type)
49987+{
49988+ struct task_struct *task, *task2;
49989+ struct acl_role_label *role = current->role;
49990+ __u16 acl_role_id = current->acl_role_id;
49991+ const struct cred *cred;
49992+ int ret;
49993+
49994+ rcu_read_lock();
49995+ read_lock(&tasklist_lock);
49996+ read_lock(&grsec_exec_file_lock);
49997+ do_each_thread(task2, task) {
49998+ /* check to see if we're called from the exit handler,
49999+ if so, only replace ACLs that have inherited the admin
50000+ ACL */
50001+
50002+ if (type && (task->role != role ||
50003+ task->acl_role_id != acl_role_id))
50004+ continue;
50005+
50006+ task->acl_role_id = 0;
50007+ task->acl_sp_role = 0;
50008+
50009+ if (task->exec_file) {
50010+ cred = __task_cred(task);
50011+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
50012+
50013+ ret = gr_apply_subject_to_task(task);
50014+ if (ret) {
50015+ read_unlock(&grsec_exec_file_lock);
50016+ read_unlock(&tasklist_lock);
50017+ rcu_read_unlock();
50018+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
50019+ return ret;
50020+ }
50021+ } else {
50022+ // it's a kernel process
50023+ task->role = kernel_role;
50024+ task->acl = kernel_role->root_label;
50025+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
50026+ task->acl->mode &= ~GR_PROCFIND;
50027+#endif
50028+ }
50029+ } while_each_thread(task2, task);
50030+ read_unlock(&grsec_exec_file_lock);
50031+ read_unlock(&tasklist_lock);
50032+ rcu_read_unlock();
50033+
50034+ return 0;
50035+}
50036+
50037+void
50038+gr_learn_resource(const struct task_struct *task,
50039+ const int res, const unsigned long wanted, const int gt)
50040+{
50041+ struct acl_subject_label *acl;
50042+ const struct cred *cred;
50043+
50044+ if (unlikely((gr_status & GR_READY) &&
50045+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
50046+ goto skip_reslog;
50047+
50048+#ifdef CONFIG_GRKERNSEC_RESLOG
50049+ gr_log_resource(task, res, wanted, gt);
50050+#endif
50051+ skip_reslog:
50052+
50053+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
50054+ return;
50055+
50056+ acl = task->acl;
50057+
50058+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
50059+ !(acl->resmask & (1 << (unsigned short) res))))
50060+ return;
50061+
50062+ if (wanted >= acl->res[res].rlim_cur) {
50063+ unsigned long res_add;
50064+
50065+ res_add = wanted;
50066+ switch (res) {
50067+ case RLIMIT_CPU:
50068+ res_add += GR_RLIM_CPU_BUMP;
50069+ break;
50070+ case RLIMIT_FSIZE:
50071+ res_add += GR_RLIM_FSIZE_BUMP;
50072+ break;
50073+ case RLIMIT_DATA:
50074+ res_add += GR_RLIM_DATA_BUMP;
50075+ break;
50076+ case RLIMIT_STACK:
50077+ res_add += GR_RLIM_STACK_BUMP;
50078+ break;
50079+ case RLIMIT_CORE:
50080+ res_add += GR_RLIM_CORE_BUMP;
50081+ break;
50082+ case RLIMIT_RSS:
50083+ res_add += GR_RLIM_RSS_BUMP;
50084+ break;
50085+ case RLIMIT_NPROC:
50086+ res_add += GR_RLIM_NPROC_BUMP;
50087+ break;
50088+ case RLIMIT_NOFILE:
50089+ res_add += GR_RLIM_NOFILE_BUMP;
50090+ break;
50091+ case RLIMIT_MEMLOCK:
50092+ res_add += GR_RLIM_MEMLOCK_BUMP;
50093+ break;
50094+ case RLIMIT_AS:
50095+ res_add += GR_RLIM_AS_BUMP;
50096+ break;
50097+ case RLIMIT_LOCKS:
50098+ res_add += GR_RLIM_LOCKS_BUMP;
50099+ break;
50100+ case RLIMIT_SIGPENDING:
50101+ res_add += GR_RLIM_SIGPENDING_BUMP;
50102+ break;
50103+ case RLIMIT_MSGQUEUE:
50104+ res_add += GR_RLIM_MSGQUEUE_BUMP;
50105+ break;
50106+ case RLIMIT_NICE:
50107+ res_add += GR_RLIM_NICE_BUMP;
50108+ break;
50109+ case RLIMIT_RTPRIO:
50110+ res_add += GR_RLIM_RTPRIO_BUMP;
50111+ break;
50112+ case RLIMIT_RTTIME:
50113+ res_add += GR_RLIM_RTTIME_BUMP;
50114+ break;
50115+ }
50116+
50117+ acl->res[res].rlim_cur = res_add;
50118+
50119+ if (wanted > acl->res[res].rlim_max)
50120+ acl->res[res].rlim_max = res_add;
50121+
50122+ /* only log the subject filename, since resource logging is supported for
50123+ single-subject learning only */
50124+ rcu_read_lock();
50125+ cred = __task_cred(task);
50126+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50127+ task->role->roletype, cred->uid, cred->gid, acl->filename,
50128+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50129+ "", (unsigned long) res, &task->signal->saved_ip);
50130+ rcu_read_unlock();
50131+ }
50132+
50133+ return;
50134+}
50135+
50136+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50137+void
50138+pax_set_initial_flags(struct linux_binprm *bprm)
50139+{
50140+ struct task_struct *task = current;
50141+ struct acl_subject_label *proc;
50142+ unsigned long flags;
50143+
50144+ if (unlikely(!(gr_status & GR_READY)))
50145+ return;
50146+
50147+ flags = pax_get_flags(task);
50148+
50149+ proc = task->acl;
50150+
50151+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
50152+ flags &= ~MF_PAX_PAGEEXEC;
50153+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
50154+ flags &= ~MF_PAX_SEGMEXEC;
50155+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
50156+ flags &= ~MF_PAX_RANDMMAP;
50157+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
50158+ flags &= ~MF_PAX_EMUTRAMP;
50159+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
50160+ flags &= ~MF_PAX_MPROTECT;
50161+
50162+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
50163+ flags |= MF_PAX_PAGEEXEC;
50164+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
50165+ flags |= MF_PAX_SEGMEXEC;
50166+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
50167+ flags |= MF_PAX_RANDMMAP;
50168+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
50169+ flags |= MF_PAX_EMUTRAMP;
50170+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
50171+ flags |= MF_PAX_MPROTECT;
50172+
50173+ pax_set_flags(task, flags);
50174+
50175+ return;
50176+}
50177+#endif
50178+
50179+#ifdef CONFIG_SYSCTL
50180+/* Eric Biederman likes breaking userland ABI and every inode-based security
50181+ system to save 35kb of memory */
50182+
50183+/* we modify the passed in filename, but adjust it back before returning */
50184+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
50185+{
50186+ struct name_entry *nmatch;
50187+ char *p, *lastp = NULL;
50188+ struct acl_object_label *obj = NULL, *tmp;
50189+ struct acl_subject_label *tmpsubj;
50190+ char c = '\0';
50191+
50192+ read_lock(&gr_inode_lock);
50193+
50194+ p = name + len - 1;
50195+ do {
50196+ nmatch = lookup_name_entry(name);
50197+ if (lastp != NULL)
50198+ *lastp = c;
50199+
50200+ if (nmatch == NULL)
50201+ goto next_component;
50202+ tmpsubj = current->acl;
50203+ do {
50204+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
50205+ if (obj != NULL) {
50206+ tmp = obj->globbed;
50207+ while (tmp) {
50208+ if (!glob_match(tmp->filename, name)) {
50209+ obj = tmp;
50210+ goto found_obj;
50211+ }
50212+ tmp = tmp->next;
50213+ }
50214+ goto found_obj;
50215+ }
50216+ } while ((tmpsubj = tmpsubj->parent_subject));
50217+next_component:
50218+ /* end case */
50219+ if (p == name)
50220+ break;
50221+
50222+ while (*p != '/')
50223+ p--;
50224+ if (p == name)
50225+ lastp = p + 1;
50226+ else {
50227+ lastp = p;
50228+ p--;
50229+ }
50230+ c = *lastp;
50231+ *lastp = '\0';
50232+ } while (1);
50233+found_obj:
50234+ read_unlock(&gr_inode_lock);
50235+ /* obj returned will always be non-null */
50236+ return obj;
50237+}
50238+
50239+/* returns 0 when allowing, non-zero on error
50240+ op of 0 is used for readdir, so we don't log the names of hidden files
50241+*/
50242+__u32
50243+gr_handle_sysctl(const struct ctl_table *table, const int op)
50244+{
50245+ ctl_table *tmp;
50246+ const char *proc_sys = "/proc/sys";
50247+ char *path;
50248+ struct acl_object_label *obj;
50249+ unsigned short len = 0, pos = 0, depth = 0, i;
50250+ __u32 err = 0;
50251+ __u32 mode = 0;
50252+
50253+ if (unlikely(!(gr_status & GR_READY)))
50254+ return 0;
50255+
50256+ /* for now, ignore operations on non-sysctl entries if it's not a
50257+ readdir*/
50258+ if (table->child != NULL && op != 0)
50259+ return 0;
50260+
50261+ mode |= GR_FIND;
50262+ /* it's only a read if it's an entry, read on dirs is for readdir */
50263+ if (op & MAY_READ)
50264+ mode |= GR_READ;
50265+ if (op & MAY_WRITE)
50266+ mode |= GR_WRITE;
50267+
50268+ preempt_disable();
50269+
50270+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50271+
50272+ /* it's only a read/write if it's an actual entry, not a dir
50273+ (which are opened for readdir)
50274+ */
50275+
50276+ /* convert the requested sysctl entry into a pathname */
50277+
50278+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50279+ len += strlen(tmp->procname);
50280+ len++;
50281+ depth++;
50282+ }
50283+
50284+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
50285+ /* deny */
50286+ goto out;
50287+ }
50288+
50289+ memset(path, 0, PAGE_SIZE);
50290+
50291+ memcpy(path, proc_sys, strlen(proc_sys));
50292+
50293+ pos += strlen(proc_sys);
50294+
50295+ for (; depth > 0; depth--) {
50296+ path[pos] = '/';
50297+ pos++;
50298+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50299+ if (depth == i) {
50300+ memcpy(path + pos, tmp->procname,
50301+ strlen(tmp->procname));
50302+ pos += strlen(tmp->procname);
50303+ }
50304+ i++;
50305+ }
50306+ }
50307+
50308+ obj = gr_lookup_by_name(path, pos);
50309+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
50310+
50311+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
50312+ ((err & mode) != mode))) {
50313+ __u32 new_mode = mode;
50314+
50315+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50316+
50317+ err = 0;
50318+ gr_log_learn_sysctl(path, new_mode);
50319+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
50320+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
50321+ err = -ENOENT;
50322+ } else if (!(err & GR_FIND)) {
50323+ err = -ENOENT;
50324+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
50325+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
50326+ path, (mode & GR_READ) ? " reading" : "",
50327+ (mode & GR_WRITE) ? " writing" : "");
50328+ err = -EACCES;
50329+ } else if ((err & mode) != mode) {
50330+ err = -EACCES;
50331+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
50332+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
50333+ path, (mode & GR_READ) ? " reading" : "",
50334+ (mode & GR_WRITE) ? " writing" : "");
50335+ err = 0;
50336+ } else
50337+ err = 0;
50338+
50339+ out:
50340+ preempt_enable();
50341+
50342+ return err;
50343+}
50344+#endif
50345+
50346+int
50347+gr_handle_proc_ptrace(struct task_struct *task)
50348+{
50349+ struct file *filp;
50350+ struct task_struct *tmp = task;
50351+ struct task_struct *curtemp = current;
50352+ __u32 retmode;
50353+
50354+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50355+ if (unlikely(!(gr_status & GR_READY)))
50356+ return 0;
50357+#endif
50358+
50359+ read_lock(&tasklist_lock);
50360+ read_lock(&grsec_exec_file_lock);
50361+ filp = task->exec_file;
50362+
50363+ while (tmp->pid > 0) {
50364+ if (tmp == curtemp)
50365+ break;
50366+ tmp = tmp->real_parent;
50367+ }
50368+
50369+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50370+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
50371+ read_unlock(&grsec_exec_file_lock);
50372+ read_unlock(&tasklist_lock);
50373+ return 1;
50374+ }
50375+
50376+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50377+ if (!(gr_status & GR_READY)) {
50378+ read_unlock(&grsec_exec_file_lock);
50379+ read_unlock(&tasklist_lock);
50380+ return 0;
50381+ }
50382+#endif
50383+
50384+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
50385+ read_unlock(&grsec_exec_file_lock);
50386+ read_unlock(&tasklist_lock);
50387+
50388+ if (retmode & GR_NOPTRACE)
50389+ return 1;
50390+
50391+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
50392+ && (current->acl != task->acl || (current->acl != current->role->root_label
50393+ && current->pid != task->pid)))
50394+ return 1;
50395+
50396+ return 0;
50397+}
50398+
50399+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
50400+{
50401+ if (unlikely(!(gr_status & GR_READY)))
50402+ return;
50403+
50404+ if (!(current->role->roletype & GR_ROLE_GOD))
50405+ return;
50406+
50407+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
50408+ p->role->rolename, gr_task_roletype_to_char(p),
50409+ p->acl->filename);
50410+}
50411+
50412+int
50413+gr_handle_ptrace(struct task_struct *task, const long request)
50414+{
50415+ struct task_struct *tmp = task;
50416+ struct task_struct *curtemp = current;
50417+ __u32 retmode;
50418+
50419+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50420+ if (unlikely(!(gr_status & GR_READY)))
50421+ return 0;
50422+#endif
50423+
50424+ read_lock(&tasklist_lock);
50425+ while (tmp->pid > 0) {
50426+ if (tmp == curtemp)
50427+ break;
50428+ tmp = tmp->real_parent;
50429+ }
50430+
50431+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50432+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
50433+ read_unlock(&tasklist_lock);
50434+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50435+ return 1;
50436+ }
50437+ read_unlock(&tasklist_lock);
50438+
50439+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50440+ if (!(gr_status & GR_READY))
50441+ return 0;
50442+#endif
50443+
50444+ read_lock(&grsec_exec_file_lock);
50445+ if (unlikely(!task->exec_file)) {
50446+ read_unlock(&grsec_exec_file_lock);
50447+ return 0;
50448+ }
50449+
50450+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
50451+ read_unlock(&grsec_exec_file_lock);
50452+
50453+ if (retmode & GR_NOPTRACE) {
50454+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50455+ return 1;
50456+ }
50457+
50458+ if (retmode & GR_PTRACERD) {
50459+ switch (request) {
50460+ case PTRACE_POKETEXT:
50461+ case PTRACE_POKEDATA:
50462+ case PTRACE_POKEUSR:
50463+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
50464+ case PTRACE_SETREGS:
50465+ case PTRACE_SETFPREGS:
50466+#endif
50467+#ifdef CONFIG_X86
50468+ case PTRACE_SETFPXREGS:
50469+#endif
50470+#ifdef CONFIG_ALTIVEC
50471+ case PTRACE_SETVRREGS:
50472+#endif
50473+ return 1;
50474+ default:
50475+ return 0;
50476+ }
50477+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
50478+ !(current->role->roletype & GR_ROLE_GOD) &&
50479+ (current->acl != task->acl)) {
50480+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50481+ return 1;
50482+ }
50483+
50484+ return 0;
50485+}
50486+
50487+static int is_writable_mmap(const struct file *filp)
50488+{
50489+ struct task_struct *task = current;
50490+ struct acl_object_label *obj, *obj2;
50491+
50492+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
50493+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
50494+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50495+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
50496+ task->role->root_label);
50497+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
50498+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
50499+ return 1;
50500+ }
50501+ }
50502+ return 0;
50503+}
50504+
50505+int
50506+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
50507+{
50508+ __u32 mode;
50509+
50510+ if (unlikely(!file || !(prot & PROT_EXEC)))
50511+ return 1;
50512+
50513+ if (is_writable_mmap(file))
50514+ return 0;
50515+
50516+ mode =
50517+ gr_search_file(file->f_path.dentry,
50518+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50519+ file->f_path.mnt);
50520+
50521+ if (!gr_tpe_allow(file))
50522+ return 0;
50523+
50524+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50525+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50526+ return 0;
50527+ } else if (unlikely(!(mode & GR_EXEC))) {
50528+ return 0;
50529+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50530+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50531+ return 1;
50532+ }
50533+
50534+ return 1;
50535+}
50536+
50537+int
50538+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50539+{
50540+ __u32 mode;
50541+
50542+ if (unlikely(!file || !(prot & PROT_EXEC)))
50543+ return 1;
50544+
50545+ if (is_writable_mmap(file))
50546+ return 0;
50547+
50548+ mode =
50549+ gr_search_file(file->f_path.dentry,
50550+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50551+ file->f_path.mnt);
50552+
50553+ if (!gr_tpe_allow(file))
50554+ return 0;
50555+
50556+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50557+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50558+ return 0;
50559+ } else if (unlikely(!(mode & GR_EXEC))) {
50560+ return 0;
50561+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50562+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50563+ return 1;
50564+ }
50565+
50566+ return 1;
50567+}
50568+
50569+void
50570+gr_acl_handle_psacct(struct task_struct *task, const long code)
50571+{
50572+ unsigned long runtime;
50573+ unsigned long cputime;
50574+ unsigned int wday, cday;
50575+ __u8 whr, chr;
50576+ __u8 wmin, cmin;
50577+ __u8 wsec, csec;
50578+ struct timespec timeval;
50579+
50580+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
50581+ !(task->acl->mode & GR_PROCACCT)))
50582+ return;
50583+
50584+ do_posix_clock_monotonic_gettime(&timeval);
50585+ runtime = timeval.tv_sec - task->start_time.tv_sec;
50586+ wday = runtime / (3600 * 24);
50587+ runtime -= wday * (3600 * 24);
50588+ whr = runtime / 3600;
50589+ runtime -= whr * 3600;
50590+ wmin = runtime / 60;
50591+ runtime -= wmin * 60;
50592+ wsec = runtime;
50593+
50594+ cputime = (task->utime + task->stime) / HZ;
50595+ cday = cputime / (3600 * 24);
50596+ cputime -= cday * (3600 * 24);
50597+ chr = cputime / 3600;
50598+ cputime -= chr * 3600;
50599+ cmin = cputime / 60;
50600+ cputime -= cmin * 60;
50601+ csec = cputime;
50602+
50603+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
50604+
50605+ return;
50606+}
50607+
50608+void gr_set_kernel_label(struct task_struct *task)
50609+{
50610+ if (gr_status & GR_READY) {
50611+ task->role = kernel_role;
50612+ task->acl = kernel_role->root_label;
50613+ }
50614+ return;
50615+}
50616+
50617+#ifdef CONFIG_TASKSTATS
50618+int gr_is_taskstats_denied(int pid)
50619+{
50620+ struct task_struct *task;
50621+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50622+ const struct cred *cred;
50623+#endif
50624+ int ret = 0;
50625+
50626+ /* restrict taskstats viewing to un-chrooted root users
50627+ who have the 'view' subject flag if the RBAC system is enabled
50628+ */
50629+
50630+ rcu_read_lock();
50631+ read_lock(&tasklist_lock);
50632+ task = find_task_by_vpid(pid);
50633+ if (task) {
50634+#ifdef CONFIG_GRKERNSEC_CHROOT
50635+ if (proc_is_chrooted(task))
50636+ ret = -EACCES;
50637+#endif
50638+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50639+ cred = __task_cred(task);
50640+#ifdef CONFIG_GRKERNSEC_PROC_USER
50641+ if (cred->uid != 0)
50642+ ret = -EACCES;
50643+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50644+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
50645+ ret = -EACCES;
50646+#endif
50647+#endif
50648+ if (gr_status & GR_READY) {
50649+ if (!(task->acl->mode & GR_VIEW))
50650+ ret = -EACCES;
50651+ }
50652+ } else
50653+ ret = -ENOENT;
50654+
50655+ read_unlock(&tasklist_lock);
50656+ rcu_read_unlock();
50657+
50658+ return ret;
50659+}
50660+#endif
50661+
50662+/* AUXV entries are filled via a descendant of search_binary_handler
50663+ after we've already applied the subject for the target
50664+*/
50665+int gr_acl_enable_at_secure(void)
50666+{
50667+ if (unlikely(!(gr_status & GR_READY)))
50668+ return 0;
50669+
50670+ if (current->acl->mode & GR_ATSECURE)
50671+ return 1;
50672+
50673+ return 0;
50674+}
50675+
50676+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
50677+{
50678+ struct task_struct *task = current;
50679+ struct dentry *dentry = file->f_path.dentry;
50680+ struct vfsmount *mnt = file->f_path.mnt;
50681+ struct acl_object_label *obj, *tmp;
50682+ struct acl_subject_label *subj;
50683+ unsigned int bufsize;
50684+ int is_not_root;
50685+ char *path;
50686+ dev_t dev = __get_dev(dentry);
50687+
50688+ if (unlikely(!(gr_status & GR_READY)))
50689+ return 1;
50690+
50691+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50692+ return 1;
50693+
50694+ /* ignore Eric Biederman */
50695+ if (IS_PRIVATE(dentry->d_inode))
50696+ return 1;
50697+
50698+ subj = task->acl;
50699+ do {
50700+ obj = lookup_acl_obj_label(ino, dev, subj);
50701+ if (obj != NULL)
50702+ return (obj->mode & GR_FIND) ? 1 : 0;
50703+ } while ((subj = subj->parent_subject));
50704+
50705+ /* this is purely an optimization since we're looking for an object
50706+ for the directory we're doing a readdir on
50707+ if it's possible for any globbed object to match the entry we're
50708+ filling into the directory, then the object we find here will be
50709+ an anchor point with attached globbed objects
50710+ */
50711+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
50712+ if (obj->globbed == NULL)
50713+ return (obj->mode & GR_FIND) ? 1 : 0;
50714+
50715+ is_not_root = ((obj->filename[0] == '/') &&
50716+ (obj->filename[1] == '\0')) ? 0 : 1;
50717+ bufsize = PAGE_SIZE - namelen - is_not_root;
50718+
50719+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
50720+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
50721+ return 1;
50722+
50723+ preempt_disable();
50724+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50725+ bufsize);
50726+
50727+ bufsize = strlen(path);
50728+
50729+ /* if base is "/", don't append an additional slash */
50730+ if (is_not_root)
50731+ *(path + bufsize) = '/';
50732+ memcpy(path + bufsize + is_not_root, name, namelen);
50733+ *(path + bufsize + namelen + is_not_root) = '\0';
50734+
50735+ tmp = obj->globbed;
50736+ while (tmp) {
50737+ if (!glob_match(tmp->filename, path)) {
50738+ preempt_enable();
50739+ return (tmp->mode & GR_FIND) ? 1 : 0;
50740+ }
50741+ tmp = tmp->next;
50742+ }
50743+ preempt_enable();
50744+ return (obj->mode & GR_FIND) ? 1 : 0;
50745+}
50746+
50747+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
50748+EXPORT_SYMBOL(gr_acl_is_enabled);
50749+#endif
50750+EXPORT_SYMBOL(gr_learn_resource);
50751+EXPORT_SYMBOL(gr_set_kernel_label);
50752+#ifdef CONFIG_SECURITY
50753+EXPORT_SYMBOL(gr_check_user_change);
50754+EXPORT_SYMBOL(gr_check_group_change);
50755+#endif
50756+
50757diff -urNp linux-2.6.32.46/grsecurity/gracl_cap.c linux-2.6.32.46/grsecurity/gracl_cap.c
50758--- linux-2.6.32.46/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
50759+++ linux-2.6.32.46/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
50760@@ -0,0 +1,138 @@
50761+#include <linux/kernel.h>
50762+#include <linux/module.h>
50763+#include <linux/sched.h>
50764+#include <linux/gracl.h>
50765+#include <linux/grsecurity.h>
50766+#include <linux/grinternal.h>
50767+
50768+static const char *captab_log[] = {
50769+ "CAP_CHOWN",
50770+ "CAP_DAC_OVERRIDE",
50771+ "CAP_DAC_READ_SEARCH",
50772+ "CAP_FOWNER",
50773+ "CAP_FSETID",
50774+ "CAP_KILL",
50775+ "CAP_SETGID",
50776+ "CAP_SETUID",
50777+ "CAP_SETPCAP",
50778+ "CAP_LINUX_IMMUTABLE",
50779+ "CAP_NET_BIND_SERVICE",
50780+ "CAP_NET_BROADCAST",
50781+ "CAP_NET_ADMIN",
50782+ "CAP_NET_RAW",
50783+ "CAP_IPC_LOCK",
50784+ "CAP_IPC_OWNER",
50785+ "CAP_SYS_MODULE",
50786+ "CAP_SYS_RAWIO",
50787+ "CAP_SYS_CHROOT",
50788+ "CAP_SYS_PTRACE",
50789+ "CAP_SYS_PACCT",
50790+ "CAP_SYS_ADMIN",
50791+ "CAP_SYS_BOOT",
50792+ "CAP_SYS_NICE",
50793+ "CAP_SYS_RESOURCE",
50794+ "CAP_SYS_TIME",
50795+ "CAP_SYS_TTY_CONFIG",
50796+ "CAP_MKNOD",
50797+ "CAP_LEASE",
50798+ "CAP_AUDIT_WRITE",
50799+ "CAP_AUDIT_CONTROL",
50800+ "CAP_SETFCAP",
50801+ "CAP_MAC_OVERRIDE",
50802+ "CAP_MAC_ADMIN"
50803+};
50804+
50805+EXPORT_SYMBOL(gr_is_capable);
50806+EXPORT_SYMBOL(gr_is_capable_nolog);
50807+
50808+int
50809+gr_is_capable(const int cap)
50810+{
50811+ struct task_struct *task = current;
50812+ const struct cred *cred = current_cred();
50813+ struct acl_subject_label *curracl;
50814+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
50815+ kernel_cap_t cap_audit = __cap_empty_set;
50816+
50817+ if (!gr_acl_is_enabled())
50818+ return 1;
50819+
50820+ curracl = task->acl;
50821+
50822+ cap_drop = curracl->cap_lower;
50823+ cap_mask = curracl->cap_mask;
50824+ cap_audit = curracl->cap_invert_audit;
50825+
50826+ while ((curracl = curracl->parent_subject)) {
50827+ /* if the cap isn't specified in the current computed mask but is specified in the
50828+ current level subject, and is lowered in the current level subject, then add
50829+ it to the set of dropped capabilities
50830+ otherwise, add the current level subject's mask to the current computed mask
50831+ */
50832+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
50833+ cap_raise(cap_mask, cap);
50834+ if (cap_raised(curracl->cap_lower, cap))
50835+ cap_raise(cap_drop, cap);
50836+ if (cap_raised(curracl->cap_invert_audit, cap))
50837+ cap_raise(cap_audit, cap);
50838+ }
50839+ }
50840+
50841+ if (!cap_raised(cap_drop, cap)) {
50842+ if (cap_raised(cap_audit, cap))
50843+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
50844+ return 1;
50845+ }
50846+
50847+ curracl = task->acl;
50848+
50849+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
50850+ && cap_raised(cred->cap_effective, cap)) {
50851+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50852+ task->role->roletype, cred->uid,
50853+ cred->gid, task->exec_file ?
50854+ gr_to_filename(task->exec_file->f_path.dentry,
50855+ task->exec_file->f_path.mnt) : curracl->filename,
50856+ curracl->filename, 0UL,
50857+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
50858+ return 1;
50859+ }
50860+
50861+ if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
50862+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
50863+ return 0;
50864+}
50865+
50866+int
50867+gr_is_capable_nolog(const int cap)
50868+{
50869+ struct acl_subject_label *curracl;
50870+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
50871+
50872+ if (!gr_acl_is_enabled())
50873+ return 1;
50874+
50875+ curracl = current->acl;
50876+
50877+ cap_drop = curracl->cap_lower;
50878+ cap_mask = curracl->cap_mask;
50879+
50880+ while ((curracl = curracl->parent_subject)) {
50881+ /* if the cap isn't specified in the current computed mask but is specified in the
50882+ current level subject, and is lowered in the current level subject, then add
50883+ it to the set of dropped capabilities
50884+ otherwise, add the current level subject's mask to the current computed mask
50885+ */
50886+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
50887+ cap_raise(cap_mask, cap);
50888+ if (cap_raised(curracl->cap_lower, cap))
50889+ cap_raise(cap_drop, cap);
50890+ }
50891+ }
50892+
50893+ if (!cap_raised(cap_drop, cap))
50894+ return 1;
50895+
50896+ return 0;
50897+}
50898+
50899diff -urNp linux-2.6.32.46/grsecurity/gracl_fs.c linux-2.6.32.46/grsecurity/gracl_fs.c
50900--- linux-2.6.32.46/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
50901+++ linux-2.6.32.46/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
50902@@ -0,0 +1,431 @@
50903+#include <linux/kernel.h>
50904+#include <linux/sched.h>
50905+#include <linux/types.h>
50906+#include <linux/fs.h>
50907+#include <linux/file.h>
50908+#include <linux/stat.h>
50909+#include <linux/grsecurity.h>
50910+#include <linux/grinternal.h>
50911+#include <linux/gracl.h>
50912+
50913+__u32
50914+gr_acl_handle_hidden_file(const struct dentry * dentry,
50915+ const struct vfsmount * mnt)
50916+{
50917+ __u32 mode;
50918+
50919+ if (unlikely(!dentry->d_inode))
50920+ return GR_FIND;
50921+
50922+ mode =
50923+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
50924+
50925+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
50926+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
50927+ return mode;
50928+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
50929+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
50930+ return 0;
50931+ } else if (unlikely(!(mode & GR_FIND)))
50932+ return 0;
50933+
50934+ return GR_FIND;
50935+}
50936+
50937+__u32
50938+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
50939+ const int fmode)
50940+{
50941+ __u32 reqmode = GR_FIND;
50942+ __u32 mode;
50943+
50944+ if (unlikely(!dentry->d_inode))
50945+ return reqmode;
50946+
50947+ if (unlikely(fmode & O_APPEND))
50948+ reqmode |= GR_APPEND;
50949+ else if (unlikely(fmode & FMODE_WRITE))
50950+ reqmode |= GR_WRITE;
50951+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
50952+ reqmode |= GR_READ;
50953+ if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
50954+ reqmode &= ~GR_READ;
50955+ mode =
50956+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
50957+ mnt);
50958+
50959+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
50960+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
50961+ reqmode & GR_READ ? " reading" : "",
50962+ reqmode & GR_WRITE ? " writing" : reqmode &
50963+ GR_APPEND ? " appending" : "");
50964+ return reqmode;
50965+ } else
50966+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
50967+ {
50968+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
50969+ reqmode & GR_READ ? " reading" : "",
50970+ reqmode & GR_WRITE ? " writing" : reqmode &
50971+ GR_APPEND ? " appending" : "");
50972+ return 0;
50973+ } else if (unlikely((mode & reqmode) != reqmode))
50974+ return 0;
50975+
50976+ return reqmode;
50977+}
50978+
50979+__u32
50980+gr_acl_handle_creat(const struct dentry * dentry,
50981+ const struct dentry * p_dentry,
50982+ const struct vfsmount * p_mnt, const int fmode,
50983+ const int imode)
50984+{
50985+ __u32 reqmode = GR_WRITE | GR_CREATE;
50986+ __u32 mode;
50987+
50988+ if (unlikely(fmode & O_APPEND))
50989+ reqmode |= GR_APPEND;
50990+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
50991+ reqmode |= GR_READ;
50992+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
50993+ reqmode |= GR_SETID;
50994+
50995+ mode =
50996+ gr_check_create(dentry, p_dentry, p_mnt,
50997+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
50998+
50999+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51000+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51001+ reqmode & GR_READ ? " reading" : "",
51002+ reqmode & GR_WRITE ? " writing" : reqmode &
51003+ GR_APPEND ? " appending" : "");
51004+ return reqmode;
51005+ } else
51006+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51007+ {
51008+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51009+ reqmode & GR_READ ? " reading" : "",
51010+ reqmode & GR_WRITE ? " writing" : reqmode &
51011+ GR_APPEND ? " appending" : "");
51012+ return 0;
51013+ } else if (unlikely((mode & reqmode) != reqmode))
51014+ return 0;
51015+
51016+ return reqmode;
51017+}
51018+
51019+__u32
51020+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
51021+ const int fmode)
51022+{
51023+ __u32 mode, reqmode = GR_FIND;
51024+
51025+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
51026+ reqmode |= GR_EXEC;
51027+ if (fmode & S_IWOTH)
51028+ reqmode |= GR_WRITE;
51029+ if (fmode & S_IROTH)
51030+ reqmode |= GR_READ;
51031+
51032+ mode =
51033+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51034+ mnt);
51035+
51036+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51037+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51038+ reqmode & GR_READ ? " reading" : "",
51039+ reqmode & GR_WRITE ? " writing" : "",
51040+ reqmode & GR_EXEC ? " executing" : "");
51041+ return reqmode;
51042+ } else
51043+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51044+ {
51045+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51046+ reqmode & GR_READ ? " reading" : "",
51047+ reqmode & GR_WRITE ? " writing" : "",
51048+ reqmode & GR_EXEC ? " executing" : "");
51049+ return 0;
51050+ } else if (unlikely((mode & reqmode) != reqmode))
51051+ return 0;
51052+
51053+ return reqmode;
51054+}
51055+
51056+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
51057+{
51058+ __u32 mode;
51059+
51060+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
51061+
51062+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51063+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
51064+ return mode;
51065+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51066+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
51067+ return 0;
51068+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51069+ return 0;
51070+
51071+ return (reqmode);
51072+}
51073+
51074+__u32
51075+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51076+{
51077+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
51078+}
51079+
51080+__u32
51081+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
51082+{
51083+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
51084+}
51085+
51086+__u32
51087+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
51088+{
51089+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
51090+}
51091+
51092+__u32
51093+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
51094+{
51095+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51096+}
51097+
51098+__u32
51099+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51100+ mode_t mode)
51101+{
51102+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51103+ return 1;
51104+
51105+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51106+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51107+ GR_FCHMOD_ACL_MSG);
51108+ } else {
51109+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51110+ }
51111+}
51112+
51113+__u32
51114+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51115+ mode_t mode)
51116+{
51117+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51118+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51119+ GR_CHMOD_ACL_MSG);
51120+ } else {
51121+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51122+ }
51123+}
51124+
51125+__u32
51126+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51127+{
51128+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51129+}
51130+
51131+__u32
51132+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51133+{
51134+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51135+}
51136+
51137+__u32
51138+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51139+{
51140+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51141+}
51142+
51143+__u32
51144+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51145+{
51146+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51147+ GR_UNIXCONNECT_ACL_MSG);
51148+}
51149+
51150+/* hardlinks require at minimum create permission,
51151+ any additional privilege required is based on the
51152+ privilege of the file being linked to
51153+*/
51154+__u32
51155+gr_acl_handle_link(const struct dentry * new_dentry,
51156+ const struct dentry * parent_dentry,
51157+ const struct vfsmount * parent_mnt,
51158+ const struct dentry * old_dentry,
51159+ const struct vfsmount * old_mnt, const char *to)
51160+{
51161+ __u32 mode;
51162+ __u32 needmode = GR_CREATE | GR_LINK;
51163+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51164+
51165+ mode =
51166+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51167+ old_mnt);
51168+
51169+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51170+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51171+ return mode;
51172+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51173+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51174+ return 0;
51175+ } else if (unlikely((mode & needmode) != needmode))
51176+ return 0;
51177+
51178+ return 1;
51179+}
51180+
51181+__u32
51182+gr_acl_handle_symlink(const struct dentry * new_dentry,
51183+ const struct dentry * parent_dentry,
51184+ const struct vfsmount * parent_mnt, const char *from)
51185+{
51186+ __u32 needmode = GR_WRITE | GR_CREATE;
51187+ __u32 mode;
51188+
51189+ mode =
51190+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
51191+ GR_CREATE | GR_AUDIT_CREATE |
51192+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
51193+
51194+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
51195+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51196+ return mode;
51197+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51198+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51199+ return 0;
51200+ } else if (unlikely((mode & needmode) != needmode))
51201+ return 0;
51202+
51203+ return (GR_WRITE | GR_CREATE);
51204+}
51205+
51206+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
51207+{
51208+ __u32 mode;
51209+
51210+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51211+
51212+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51213+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
51214+ return mode;
51215+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51216+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
51217+ return 0;
51218+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51219+ return 0;
51220+
51221+ return (reqmode);
51222+}
51223+
51224+__u32
51225+gr_acl_handle_mknod(const struct dentry * new_dentry,
51226+ const struct dentry * parent_dentry,
51227+ const struct vfsmount * parent_mnt,
51228+ const int mode)
51229+{
51230+ __u32 reqmode = GR_WRITE | GR_CREATE;
51231+ if (unlikely(mode & (S_ISUID | S_ISGID)))
51232+ reqmode |= GR_SETID;
51233+
51234+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51235+ reqmode, GR_MKNOD_ACL_MSG);
51236+}
51237+
51238+__u32
51239+gr_acl_handle_mkdir(const struct dentry *new_dentry,
51240+ const struct dentry *parent_dentry,
51241+ const struct vfsmount *parent_mnt)
51242+{
51243+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51244+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
51245+}
51246+
51247+#define RENAME_CHECK_SUCCESS(old, new) \
51248+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
51249+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
51250+
51251+int
51252+gr_acl_handle_rename(struct dentry *new_dentry,
51253+ struct dentry *parent_dentry,
51254+ const struct vfsmount *parent_mnt,
51255+ struct dentry *old_dentry,
51256+ struct inode *old_parent_inode,
51257+ struct vfsmount *old_mnt, const char *newname)
51258+{
51259+ __u32 comp1, comp2;
51260+ int error = 0;
51261+
51262+ if (unlikely(!gr_acl_is_enabled()))
51263+ return 0;
51264+
51265+ if (!new_dentry->d_inode) {
51266+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
51267+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
51268+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
51269+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
51270+ GR_DELETE | GR_AUDIT_DELETE |
51271+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51272+ GR_SUPPRESS, old_mnt);
51273+ } else {
51274+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
51275+ GR_CREATE | GR_DELETE |
51276+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
51277+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51278+ GR_SUPPRESS, parent_mnt);
51279+ comp2 =
51280+ gr_search_file(old_dentry,
51281+ GR_READ | GR_WRITE | GR_AUDIT_READ |
51282+ GR_DELETE | GR_AUDIT_DELETE |
51283+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
51284+ }
51285+
51286+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
51287+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
51288+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51289+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
51290+ && !(comp2 & GR_SUPPRESS)) {
51291+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51292+ error = -EACCES;
51293+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
51294+ error = -EACCES;
51295+
51296+ return error;
51297+}
51298+
51299+void
51300+gr_acl_handle_exit(void)
51301+{
51302+ u16 id;
51303+ char *rolename;
51304+ struct file *exec_file;
51305+
51306+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
51307+ !(current->role->roletype & GR_ROLE_PERSIST))) {
51308+ id = current->acl_role_id;
51309+ rolename = current->role->rolename;
51310+ gr_set_acls(1);
51311+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
51312+ }
51313+
51314+ write_lock(&grsec_exec_file_lock);
51315+ exec_file = current->exec_file;
51316+ current->exec_file = NULL;
51317+ write_unlock(&grsec_exec_file_lock);
51318+
51319+ if (exec_file)
51320+ fput(exec_file);
51321+}
51322+
51323+int
51324+gr_acl_handle_procpidmem(const struct task_struct *task)
51325+{
51326+ if (unlikely(!gr_acl_is_enabled()))
51327+ return 0;
51328+
51329+ if (task != current && task->acl->mode & GR_PROTPROCFD)
51330+ return -EACCES;
51331+
51332+ return 0;
51333+}
51334diff -urNp linux-2.6.32.46/grsecurity/gracl_ip.c linux-2.6.32.46/grsecurity/gracl_ip.c
51335--- linux-2.6.32.46/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
51336+++ linux-2.6.32.46/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
51337@@ -0,0 +1,382 @@
51338+#include <linux/kernel.h>
51339+#include <asm/uaccess.h>
51340+#include <asm/errno.h>
51341+#include <net/sock.h>
51342+#include <linux/file.h>
51343+#include <linux/fs.h>
51344+#include <linux/net.h>
51345+#include <linux/in.h>
51346+#include <linux/skbuff.h>
51347+#include <linux/ip.h>
51348+#include <linux/udp.h>
51349+#include <linux/smp_lock.h>
51350+#include <linux/types.h>
51351+#include <linux/sched.h>
51352+#include <linux/netdevice.h>
51353+#include <linux/inetdevice.h>
51354+#include <linux/gracl.h>
51355+#include <linux/grsecurity.h>
51356+#include <linux/grinternal.h>
51357+
51358+#define GR_BIND 0x01
51359+#define GR_CONNECT 0x02
51360+#define GR_INVERT 0x04
51361+#define GR_BINDOVERRIDE 0x08
51362+#define GR_CONNECTOVERRIDE 0x10
51363+#define GR_SOCK_FAMILY 0x20
51364+
51365+static const char * gr_protocols[IPPROTO_MAX] = {
51366+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
51367+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
51368+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
51369+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
51370+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
51371+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
51372+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
51373+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
51374+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
51375+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
51376+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
51377+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
51378+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
51379+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
51380+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
51381+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
51382+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
51383+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
51384+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
51385+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
51386+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
51387+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
51388+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
51389+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
51390+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
51391+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
51392+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
51393+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
51394+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
51395+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
51396+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
51397+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
51398+ };
51399+
51400+static const char * gr_socktypes[SOCK_MAX] = {
51401+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
51402+ "unknown:7", "unknown:8", "unknown:9", "packet"
51403+ };
51404+
51405+static const char * gr_sockfamilies[AF_MAX+1] = {
51406+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
51407+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
51408+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
51409+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
51410+ };
51411+
51412+const char *
51413+gr_proto_to_name(unsigned char proto)
51414+{
51415+ return gr_protocols[proto];
51416+}
51417+
51418+const char *
51419+gr_socktype_to_name(unsigned char type)
51420+{
51421+ return gr_socktypes[type];
51422+}
51423+
51424+const char *
51425+gr_sockfamily_to_name(unsigned char family)
51426+{
51427+ return gr_sockfamilies[family];
51428+}
51429+
51430+int
51431+gr_search_socket(const int domain, const int type, const int protocol)
51432+{
51433+ struct acl_subject_label *curr;
51434+ const struct cred *cred = current_cred();
51435+
51436+ if (unlikely(!gr_acl_is_enabled()))
51437+ goto exit;
51438+
51439+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
51440+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
51441+ goto exit; // let the kernel handle it
51442+
51443+ curr = current->acl;
51444+
51445+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
51446+ /* the family is allowed, if this is PF_INET allow it only if
51447+ the extra sock type/protocol checks pass */
51448+ if (domain == PF_INET)
51449+ goto inet_check;
51450+ goto exit;
51451+ } else {
51452+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51453+ __u32 fakeip = 0;
51454+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51455+ current->role->roletype, cred->uid,
51456+ cred->gid, current->exec_file ?
51457+ gr_to_filename(current->exec_file->f_path.dentry,
51458+ current->exec_file->f_path.mnt) :
51459+ curr->filename, curr->filename,
51460+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
51461+ &current->signal->saved_ip);
51462+ goto exit;
51463+ }
51464+ goto exit_fail;
51465+ }
51466+
51467+inet_check:
51468+ /* the rest of this checking is for IPv4 only */
51469+ if (!curr->ips)
51470+ goto exit;
51471+
51472+ if ((curr->ip_type & (1 << type)) &&
51473+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
51474+ goto exit;
51475+
51476+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51477+ /* we don't place acls on raw sockets , and sometimes
51478+ dgram/ip sockets are opened for ioctl and not
51479+ bind/connect, so we'll fake a bind learn log */
51480+ if (type == SOCK_RAW || type == SOCK_PACKET) {
51481+ __u32 fakeip = 0;
51482+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51483+ current->role->roletype, cred->uid,
51484+ cred->gid, current->exec_file ?
51485+ gr_to_filename(current->exec_file->f_path.dentry,
51486+ current->exec_file->f_path.mnt) :
51487+ curr->filename, curr->filename,
51488+ &fakeip, 0, type,
51489+ protocol, GR_CONNECT, &current->signal->saved_ip);
51490+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
51491+ __u32 fakeip = 0;
51492+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51493+ current->role->roletype, cred->uid,
51494+ cred->gid, current->exec_file ?
51495+ gr_to_filename(current->exec_file->f_path.dentry,
51496+ current->exec_file->f_path.mnt) :
51497+ curr->filename, curr->filename,
51498+ &fakeip, 0, type,
51499+ protocol, GR_BIND, &current->signal->saved_ip);
51500+ }
51501+ /* we'll log when they use connect or bind */
51502+ goto exit;
51503+ }
51504+
51505+exit_fail:
51506+ if (domain == PF_INET)
51507+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
51508+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
51509+ else
51510+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
51511+ gr_socktype_to_name(type), protocol);
51512+
51513+ return 0;
51514+exit:
51515+ return 1;
51516+}
51517+
51518+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
51519+{
51520+ if ((ip->mode & mode) &&
51521+ (ip_port >= ip->low) &&
51522+ (ip_port <= ip->high) &&
51523+ ((ntohl(ip_addr) & our_netmask) ==
51524+ (ntohl(our_addr) & our_netmask))
51525+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
51526+ && (ip->type & (1 << type))) {
51527+ if (ip->mode & GR_INVERT)
51528+ return 2; // specifically denied
51529+ else
51530+ return 1; // allowed
51531+ }
51532+
51533+ return 0; // not specifically allowed, may continue parsing
51534+}
51535+
51536+static int
51537+gr_search_connectbind(const int full_mode, struct sock *sk,
51538+ struct sockaddr_in *addr, const int type)
51539+{
51540+ char iface[IFNAMSIZ] = {0};
51541+ struct acl_subject_label *curr;
51542+ struct acl_ip_label *ip;
51543+ struct inet_sock *isk;
51544+ struct net_device *dev;
51545+ struct in_device *idev;
51546+ unsigned long i;
51547+ int ret;
51548+ int mode = full_mode & (GR_BIND | GR_CONNECT);
51549+ __u32 ip_addr = 0;
51550+ __u32 our_addr;
51551+ __u32 our_netmask;
51552+ char *p;
51553+ __u16 ip_port = 0;
51554+ const struct cred *cred = current_cred();
51555+
51556+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
51557+ return 0;
51558+
51559+ curr = current->acl;
51560+ isk = inet_sk(sk);
51561+
51562+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
51563+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
51564+ addr->sin_addr.s_addr = curr->inaddr_any_override;
51565+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
51566+ struct sockaddr_in saddr;
51567+ int err;
51568+
51569+ saddr.sin_family = AF_INET;
51570+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
51571+ saddr.sin_port = isk->sport;
51572+
51573+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51574+ if (err)
51575+ return err;
51576+
51577+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
51578+ if (err)
51579+ return err;
51580+ }
51581+
51582+ if (!curr->ips)
51583+ return 0;
51584+
51585+ ip_addr = addr->sin_addr.s_addr;
51586+ ip_port = ntohs(addr->sin_port);
51587+
51588+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51589+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51590+ current->role->roletype, cred->uid,
51591+ cred->gid, current->exec_file ?
51592+ gr_to_filename(current->exec_file->f_path.dentry,
51593+ current->exec_file->f_path.mnt) :
51594+ curr->filename, curr->filename,
51595+ &ip_addr, ip_port, type,
51596+ sk->sk_protocol, mode, &current->signal->saved_ip);
51597+ return 0;
51598+ }
51599+
51600+ for (i = 0; i < curr->ip_num; i++) {
51601+ ip = *(curr->ips + i);
51602+ if (ip->iface != NULL) {
51603+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
51604+ p = strchr(iface, ':');
51605+ if (p != NULL)
51606+ *p = '\0';
51607+ dev = dev_get_by_name(sock_net(sk), iface);
51608+ if (dev == NULL)
51609+ continue;
51610+ idev = in_dev_get(dev);
51611+ if (idev == NULL) {
51612+ dev_put(dev);
51613+ continue;
51614+ }
51615+ rcu_read_lock();
51616+ for_ifa(idev) {
51617+ if (!strcmp(ip->iface, ifa->ifa_label)) {
51618+ our_addr = ifa->ifa_address;
51619+ our_netmask = 0xffffffff;
51620+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51621+ if (ret == 1) {
51622+ rcu_read_unlock();
51623+ in_dev_put(idev);
51624+ dev_put(dev);
51625+ return 0;
51626+ } else if (ret == 2) {
51627+ rcu_read_unlock();
51628+ in_dev_put(idev);
51629+ dev_put(dev);
51630+ goto denied;
51631+ }
51632+ }
51633+ } endfor_ifa(idev);
51634+ rcu_read_unlock();
51635+ in_dev_put(idev);
51636+ dev_put(dev);
51637+ } else {
51638+ our_addr = ip->addr;
51639+ our_netmask = ip->netmask;
51640+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
51641+ if (ret == 1)
51642+ return 0;
51643+ else if (ret == 2)
51644+ goto denied;
51645+ }
51646+ }
51647+
51648+denied:
51649+ if (mode == GR_BIND)
51650+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51651+ else if (mode == GR_CONNECT)
51652+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
51653+
51654+ return -EACCES;
51655+}
51656+
51657+int
51658+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
51659+{
51660+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
51661+}
51662+
51663+int
51664+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
51665+{
51666+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
51667+}
51668+
51669+int gr_search_listen(struct socket *sock)
51670+{
51671+ struct sock *sk = sock->sk;
51672+ struct sockaddr_in addr;
51673+
51674+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
51675+ addr.sin_port = inet_sk(sk)->sport;
51676+
51677+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51678+}
51679+
51680+int gr_search_accept(struct socket *sock)
51681+{
51682+ struct sock *sk = sock->sk;
51683+ struct sockaddr_in addr;
51684+
51685+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
51686+ addr.sin_port = inet_sk(sk)->sport;
51687+
51688+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
51689+}
51690+
51691+int
51692+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
51693+{
51694+ if (addr)
51695+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
51696+ else {
51697+ struct sockaddr_in sin;
51698+ const struct inet_sock *inet = inet_sk(sk);
51699+
51700+ sin.sin_addr.s_addr = inet->daddr;
51701+ sin.sin_port = inet->dport;
51702+
51703+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
51704+ }
51705+}
51706+
51707+int
51708+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
51709+{
51710+ struct sockaddr_in sin;
51711+
51712+ if (unlikely(skb->len < sizeof (struct udphdr)))
51713+ return 0; // skip this packet
51714+
51715+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
51716+ sin.sin_port = udp_hdr(skb)->source;
51717+
51718+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
51719+}
51720diff -urNp linux-2.6.32.46/grsecurity/gracl_learn.c linux-2.6.32.46/grsecurity/gracl_learn.c
51721--- linux-2.6.32.46/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
51722+++ linux-2.6.32.46/grsecurity/gracl_learn.c 2011-07-14 21:02:03.000000000 -0400
51723@@ -0,0 +1,208 @@
51724+#include <linux/kernel.h>
51725+#include <linux/mm.h>
51726+#include <linux/sched.h>
51727+#include <linux/poll.h>
51728+#include <linux/smp_lock.h>
51729+#include <linux/string.h>
51730+#include <linux/file.h>
51731+#include <linux/types.h>
51732+#include <linux/vmalloc.h>
51733+#include <linux/grinternal.h>
51734+
51735+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
51736+ size_t count, loff_t *ppos);
51737+extern int gr_acl_is_enabled(void);
51738+
51739+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
51740+static int gr_learn_attached;
51741+
51742+/* use a 512k buffer */
51743+#define LEARN_BUFFER_SIZE (512 * 1024)
51744+
51745+static DEFINE_SPINLOCK(gr_learn_lock);
51746+static DEFINE_MUTEX(gr_learn_user_mutex);
51747+
51748+/* we need to maintain two buffers, so that the kernel context of grlearn
51749+ uses a semaphore around the userspace copying, and the other kernel contexts
51750+ use a spinlock when copying into the buffer, since they cannot sleep
51751+*/
51752+static char *learn_buffer;
51753+static char *learn_buffer_user;
51754+static int learn_buffer_len;
51755+static int learn_buffer_user_len;
51756+
51757+static ssize_t
51758+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
51759+{
51760+ DECLARE_WAITQUEUE(wait, current);
51761+ ssize_t retval = 0;
51762+
51763+ add_wait_queue(&learn_wait, &wait);
51764+ set_current_state(TASK_INTERRUPTIBLE);
51765+ do {
51766+ mutex_lock(&gr_learn_user_mutex);
51767+ spin_lock(&gr_learn_lock);
51768+ if (learn_buffer_len)
51769+ break;
51770+ spin_unlock(&gr_learn_lock);
51771+ mutex_unlock(&gr_learn_user_mutex);
51772+ if (file->f_flags & O_NONBLOCK) {
51773+ retval = -EAGAIN;
51774+ goto out;
51775+ }
51776+ if (signal_pending(current)) {
51777+ retval = -ERESTARTSYS;
51778+ goto out;
51779+ }
51780+
51781+ schedule();
51782+ } while (1);
51783+
51784+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
51785+ learn_buffer_user_len = learn_buffer_len;
51786+ retval = learn_buffer_len;
51787+ learn_buffer_len = 0;
51788+
51789+ spin_unlock(&gr_learn_lock);
51790+
51791+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
51792+ retval = -EFAULT;
51793+
51794+ mutex_unlock(&gr_learn_user_mutex);
51795+out:
51796+ set_current_state(TASK_RUNNING);
51797+ remove_wait_queue(&learn_wait, &wait);
51798+ return retval;
51799+}
51800+
51801+static unsigned int
51802+poll_learn(struct file * file, poll_table * wait)
51803+{
51804+ poll_wait(file, &learn_wait, wait);
51805+
51806+ if (learn_buffer_len)
51807+ return (POLLIN | POLLRDNORM);
51808+
51809+ return 0;
51810+}
51811+
51812+void
51813+gr_clear_learn_entries(void)
51814+{
51815+ char *tmp;
51816+
51817+ mutex_lock(&gr_learn_user_mutex);
51818+ spin_lock(&gr_learn_lock);
51819+ tmp = learn_buffer;
51820+ learn_buffer = NULL;
51821+ spin_unlock(&gr_learn_lock);
51822+ if (tmp)
51823+ vfree(tmp);
51824+ if (learn_buffer_user != NULL) {
51825+ vfree(learn_buffer_user);
51826+ learn_buffer_user = NULL;
51827+ }
51828+ learn_buffer_len = 0;
51829+ mutex_unlock(&gr_learn_user_mutex);
51830+
51831+ return;
51832+}
51833+
51834+void
51835+gr_add_learn_entry(const char *fmt, ...)
51836+{
51837+ va_list args;
51838+ unsigned int len;
51839+
51840+ if (!gr_learn_attached)
51841+ return;
51842+
51843+ spin_lock(&gr_learn_lock);
51844+
51845+ /* leave a gap at the end so we know when it's "full" but don't have to
51846+ compute the exact length of the string we're trying to append
51847+ */
51848+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
51849+ spin_unlock(&gr_learn_lock);
51850+ wake_up_interruptible(&learn_wait);
51851+ return;
51852+ }
51853+ if (learn_buffer == NULL) {
51854+ spin_unlock(&gr_learn_lock);
51855+ return;
51856+ }
51857+
51858+ va_start(args, fmt);
51859+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
51860+ va_end(args);
51861+
51862+ learn_buffer_len += len + 1;
51863+
51864+ spin_unlock(&gr_learn_lock);
51865+ wake_up_interruptible(&learn_wait);
51866+
51867+ return;
51868+}
51869+
51870+static int
51871+open_learn(struct inode *inode, struct file *file)
51872+{
51873+ if (file->f_mode & FMODE_READ && gr_learn_attached)
51874+ return -EBUSY;
51875+ if (file->f_mode & FMODE_READ) {
51876+ int retval = 0;
51877+ mutex_lock(&gr_learn_user_mutex);
51878+ if (learn_buffer == NULL)
51879+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
51880+ if (learn_buffer_user == NULL)
51881+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
51882+ if (learn_buffer == NULL) {
51883+ retval = -ENOMEM;
51884+ goto out_error;
51885+ }
51886+ if (learn_buffer_user == NULL) {
51887+ retval = -ENOMEM;
51888+ goto out_error;
51889+ }
51890+ learn_buffer_len = 0;
51891+ learn_buffer_user_len = 0;
51892+ gr_learn_attached = 1;
51893+out_error:
51894+ mutex_unlock(&gr_learn_user_mutex);
51895+ return retval;
51896+ }
51897+ return 0;
51898+}
51899+
51900+static int
51901+close_learn(struct inode *inode, struct file *file)
51902+{
51903+ if (file->f_mode & FMODE_READ) {
51904+ char *tmp = NULL;
51905+ mutex_lock(&gr_learn_user_mutex);
51906+ spin_lock(&gr_learn_lock);
51907+ tmp = learn_buffer;
51908+ learn_buffer = NULL;
51909+ spin_unlock(&gr_learn_lock);
51910+ if (tmp)
51911+ vfree(tmp);
51912+ if (learn_buffer_user != NULL) {
51913+ vfree(learn_buffer_user);
51914+ learn_buffer_user = NULL;
51915+ }
51916+ learn_buffer_len = 0;
51917+ learn_buffer_user_len = 0;
51918+ gr_learn_attached = 0;
51919+ mutex_unlock(&gr_learn_user_mutex);
51920+ }
51921+
51922+ return 0;
51923+}
51924+
51925+const struct file_operations grsec_fops = {
51926+ .read = read_learn,
51927+ .write = write_grsec_handler,
51928+ .open = open_learn,
51929+ .release = close_learn,
51930+ .poll = poll_learn,
51931+};
51932diff -urNp linux-2.6.32.46/grsecurity/gracl_res.c linux-2.6.32.46/grsecurity/gracl_res.c
51933--- linux-2.6.32.46/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
51934+++ linux-2.6.32.46/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
51935@@ -0,0 +1,67 @@
51936+#include <linux/kernel.h>
51937+#include <linux/sched.h>
51938+#include <linux/gracl.h>
51939+#include <linux/grinternal.h>
51940+
51941+static const char *restab_log[] = {
51942+ [RLIMIT_CPU] = "RLIMIT_CPU",
51943+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
51944+ [RLIMIT_DATA] = "RLIMIT_DATA",
51945+ [RLIMIT_STACK] = "RLIMIT_STACK",
51946+ [RLIMIT_CORE] = "RLIMIT_CORE",
51947+ [RLIMIT_RSS] = "RLIMIT_RSS",
51948+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
51949+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
51950+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
51951+ [RLIMIT_AS] = "RLIMIT_AS",
51952+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
51953+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
51954+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
51955+ [RLIMIT_NICE] = "RLIMIT_NICE",
51956+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
51957+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
51958+ [GR_CRASH_RES] = "RLIMIT_CRASH"
51959+};
51960+
51961+void
51962+gr_log_resource(const struct task_struct *task,
51963+ const int res, const unsigned long wanted, const int gt)
51964+{
51965+ const struct cred *cred;
51966+ unsigned long rlim;
51967+
51968+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
51969+ return;
51970+
51971+ // not yet supported resource
51972+ if (unlikely(!restab_log[res]))
51973+ return;
51974+
51975+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
51976+ rlim = task->signal->rlim[res].rlim_max;
51977+ else
51978+ rlim = task->signal->rlim[res].rlim_cur;
51979+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
51980+ return;
51981+
51982+ rcu_read_lock();
51983+ cred = __task_cred(task);
51984+
51985+ if (res == RLIMIT_NPROC &&
51986+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
51987+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
51988+ goto out_rcu_unlock;
51989+ else if (res == RLIMIT_MEMLOCK &&
51990+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
51991+ goto out_rcu_unlock;
51992+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
51993+ goto out_rcu_unlock;
51994+ rcu_read_unlock();
51995+
51996+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
51997+
51998+ return;
51999+out_rcu_unlock:
52000+ rcu_read_unlock();
52001+ return;
52002+}
52003diff -urNp linux-2.6.32.46/grsecurity/gracl_segv.c linux-2.6.32.46/grsecurity/gracl_segv.c
52004--- linux-2.6.32.46/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
52005+++ linux-2.6.32.46/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
52006@@ -0,0 +1,284 @@
52007+#include <linux/kernel.h>
52008+#include <linux/mm.h>
52009+#include <asm/uaccess.h>
52010+#include <asm/errno.h>
52011+#include <asm/mman.h>
52012+#include <net/sock.h>
52013+#include <linux/file.h>
52014+#include <linux/fs.h>
52015+#include <linux/net.h>
52016+#include <linux/in.h>
52017+#include <linux/smp_lock.h>
52018+#include <linux/slab.h>
52019+#include <linux/types.h>
52020+#include <linux/sched.h>
52021+#include <linux/timer.h>
52022+#include <linux/gracl.h>
52023+#include <linux/grsecurity.h>
52024+#include <linux/grinternal.h>
52025+
52026+static struct crash_uid *uid_set;
52027+static unsigned short uid_used;
52028+static DEFINE_SPINLOCK(gr_uid_lock);
52029+extern rwlock_t gr_inode_lock;
52030+extern struct acl_subject_label *
52031+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
52032+ struct acl_role_label *role);
52033+extern int gr_fake_force_sig(int sig, struct task_struct *t);
52034+
52035+int
52036+gr_init_uidset(void)
52037+{
52038+ uid_set =
52039+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
52040+ uid_used = 0;
52041+
52042+ return uid_set ? 1 : 0;
52043+}
52044+
52045+void
52046+gr_free_uidset(void)
52047+{
52048+ if (uid_set)
52049+ kfree(uid_set);
52050+
52051+ return;
52052+}
52053+
52054+int
52055+gr_find_uid(const uid_t uid)
52056+{
52057+ struct crash_uid *tmp = uid_set;
52058+ uid_t buid;
52059+ int low = 0, high = uid_used - 1, mid;
52060+
52061+ while (high >= low) {
52062+ mid = (low + high) >> 1;
52063+ buid = tmp[mid].uid;
52064+ if (buid == uid)
52065+ return mid;
52066+ if (buid > uid)
52067+ high = mid - 1;
52068+ if (buid < uid)
52069+ low = mid + 1;
52070+ }
52071+
52072+ return -1;
52073+}
52074+
52075+static __inline__ void
52076+gr_insertsort(void)
52077+{
52078+ unsigned short i, j;
52079+ struct crash_uid index;
52080+
52081+ for (i = 1; i < uid_used; i++) {
52082+ index = uid_set[i];
52083+ j = i;
52084+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
52085+ uid_set[j] = uid_set[j - 1];
52086+ j--;
52087+ }
52088+ uid_set[j] = index;
52089+ }
52090+
52091+ return;
52092+}
52093+
52094+static __inline__ void
52095+gr_insert_uid(const uid_t uid, const unsigned long expires)
52096+{
52097+ int loc;
52098+
52099+ if (uid_used == GR_UIDTABLE_MAX)
52100+ return;
52101+
52102+ loc = gr_find_uid(uid);
52103+
52104+ if (loc >= 0) {
52105+ uid_set[loc].expires = expires;
52106+ return;
52107+ }
52108+
52109+ uid_set[uid_used].uid = uid;
52110+ uid_set[uid_used].expires = expires;
52111+ uid_used++;
52112+
52113+ gr_insertsort();
52114+
52115+ return;
52116+}
52117+
52118+void
52119+gr_remove_uid(const unsigned short loc)
52120+{
52121+ unsigned short i;
52122+
52123+ for (i = loc + 1; i < uid_used; i++)
52124+ uid_set[i - 1] = uid_set[i];
52125+
52126+ uid_used--;
52127+
52128+ return;
52129+}
52130+
52131+int
52132+gr_check_crash_uid(const uid_t uid)
52133+{
52134+ int loc;
52135+ int ret = 0;
52136+
52137+ if (unlikely(!gr_acl_is_enabled()))
52138+ return 0;
52139+
52140+ spin_lock(&gr_uid_lock);
52141+ loc = gr_find_uid(uid);
52142+
52143+ if (loc < 0)
52144+ goto out_unlock;
52145+
52146+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
52147+ gr_remove_uid(loc);
52148+ else
52149+ ret = 1;
52150+
52151+out_unlock:
52152+ spin_unlock(&gr_uid_lock);
52153+ return ret;
52154+}
52155+
52156+static __inline__ int
52157+proc_is_setxid(const struct cred *cred)
52158+{
52159+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
52160+ cred->uid != cred->fsuid)
52161+ return 1;
52162+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52163+ cred->gid != cred->fsgid)
52164+ return 1;
52165+
52166+ return 0;
52167+}
52168+
52169+void
52170+gr_handle_crash(struct task_struct *task, const int sig)
52171+{
52172+ struct acl_subject_label *curr;
52173+ struct acl_subject_label *curr2;
52174+ struct task_struct *tsk, *tsk2;
52175+ const struct cred *cred;
52176+ const struct cred *cred2;
52177+
52178+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
52179+ return;
52180+
52181+ if (unlikely(!gr_acl_is_enabled()))
52182+ return;
52183+
52184+ curr = task->acl;
52185+
52186+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
52187+ return;
52188+
52189+ if (time_before_eq(curr->expires, get_seconds())) {
52190+ curr->expires = 0;
52191+ curr->crashes = 0;
52192+ }
52193+
52194+ curr->crashes++;
52195+
52196+ if (!curr->expires)
52197+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
52198+
52199+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52200+ time_after(curr->expires, get_seconds())) {
52201+ rcu_read_lock();
52202+ cred = __task_cred(task);
52203+ if (cred->uid && proc_is_setxid(cred)) {
52204+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52205+ spin_lock(&gr_uid_lock);
52206+ gr_insert_uid(cred->uid, curr->expires);
52207+ spin_unlock(&gr_uid_lock);
52208+ curr->expires = 0;
52209+ curr->crashes = 0;
52210+ read_lock(&tasklist_lock);
52211+ do_each_thread(tsk2, tsk) {
52212+ cred2 = __task_cred(tsk);
52213+ if (tsk != task && cred2->uid == cred->uid)
52214+ gr_fake_force_sig(SIGKILL, tsk);
52215+ } while_each_thread(tsk2, tsk);
52216+ read_unlock(&tasklist_lock);
52217+ } else {
52218+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52219+ read_lock(&tasklist_lock);
52220+ do_each_thread(tsk2, tsk) {
52221+ if (likely(tsk != task)) {
52222+ curr2 = tsk->acl;
52223+
52224+ if (curr2->device == curr->device &&
52225+ curr2->inode == curr->inode)
52226+ gr_fake_force_sig(SIGKILL, tsk);
52227+ }
52228+ } while_each_thread(tsk2, tsk);
52229+ read_unlock(&tasklist_lock);
52230+ }
52231+ rcu_read_unlock();
52232+ }
52233+
52234+ return;
52235+}
52236+
52237+int
52238+gr_check_crash_exec(const struct file *filp)
52239+{
52240+ struct acl_subject_label *curr;
52241+
52242+ if (unlikely(!gr_acl_is_enabled()))
52243+ return 0;
52244+
52245+ read_lock(&gr_inode_lock);
52246+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
52247+ filp->f_path.dentry->d_inode->i_sb->s_dev,
52248+ current->role);
52249+ read_unlock(&gr_inode_lock);
52250+
52251+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
52252+ (!curr->crashes && !curr->expires))
52253+ return 0;
52254+
52255+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52256+ time_after(curr->expires, get_seconds()))
52257+ return 1;
52258+ else if (time_before_eq(curr->expires, get_seconds())) {
52259+ curr->crashes = 0;
52260+ curr->expires = 0;
52261+ }
52262+
52263+ return 0;
52264+}
52265+
52266+void
52267+gr_handle_alertkill(struct task_struct *task)
52268+{
52269+ struct acl_subject_label *curracl;
52270+ __u32 curr_ip;
52271+ struct task_struct *p, *p2;
52272+
52273+ if (unlikely(!gr_acl_is_enabled()))
52274+ return;
52275+
52276+ curracl = task->acl;
52277+ curr_ip = task->signal->curr_ip;
52278+
52279+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
52280+ read_lock(&tasklist_lock);
52281+ do_each_thread(p2, p) {
52282+ if (p->signal->curr_ip == curr_ip)
52283+ gr_fake_force_sig(SIGKILL, p);
52284+ } while_each_thread(p2, p);
52285+ read_unlock(&tasklist_lock);
52286+ } else if (curracl->mode & GR_KILLPROC)
52287+ gr_fake_force_sig(SIGKILL, task);
52288+
52289+ return;
52290+}
52291diff -urNp linux-2.6.32.46/grsecurity/gracl_shm.c linux-2.6.32.46/grsecurity/gracl_shm.c
52292--- linux-2.6.32.46/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
52293+++ linux-2.6.32.46/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
52294@@ -0,0 +1,40 @@
52295+#include <linux/kernel.h>
52296+#include <linux/mm.h>
52297+#include <linux/sched.h>
52298+#include <linux/file.h>
52299+#include <linux/ipc.h>
52300+#include <linux/gracl.h>
52301+#include <linux/grsecurity.h>
52302+#include <linux/grinternal.h>
52303+
52304+int
52305+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52306+ const time_t shm_createtime, const uid_t cuid, const int shmid)
52307+{
52308+ struct task_struct *task;
52309+
52310+ if (!gr_acl_is_enabled())
52311+ return 1;
52312+
52313+ rcu_read_lock();
52314+ read_lock(&tasklist_lock);
52315+
52316+ task = find_task_by_vpid(shm_cprid);
52317+
52318+ if (unlikely(!task))
52319+ task = find_task_by_vpid(shm_lapid);
52320+
52321+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
52322+ (task->pid == shm_lapid)) &&
52323+ (task->acl->mode & GR_PROTSHM) &&
52324+ (task->acl != current->acl))) {
52325+ read_unlock(&tasklist_lock);
52326+ rcu_read_unlock();
52327+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
52328+ return 0;
52329+ }
52330+ read_unlock(&tasklist_lock);
52331+ rcu_read_unlock();
52332+
52333+ return 1;
52334+}
52335diff -urNp linux-2.6.32.46/grsecurity/grsec_chdir.c linux-2.6.32.46/grsecurity/grsec_chdir.c
52336--- linux-2.6.32.46/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
52337+++ linux-2.6.32.46/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
52338@@ -0,0 +1,19 @@
52339+#include <linux/kernel.h>
52340+#include <linux/sched.h>
52341+#include <linux/fs.h>
52342+#include <linux/file.h>
52343+#include <linux/grsecurity.h>
52344+#include <linux/grinternal.h>
52345+
52346+void
52347+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
52348+{
52349+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52350+ if ((grsec_enable_chdir && grsec_enable_group &&
52351+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
52352+ !grsec_enable_group)) {
52353+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
52354+ }
52355+#endif
52356+ return;
52357+}
52358diff -urNp linux-2.6.32.46/grsecurity/grsec_chroot.c linux-2.6.32.46/grsecurity/grsec_chroot.c
52359--- linux-2.6.32.46/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
52360+++ linux-2.6.32.46/grsecurity/grsec_chroot.c 2011-07-18 17:14:10.000000000 -0400
52361@@ -0,0 +1,384 @@
52362+#include <linux/kernel.h>
52363+#include <linux/module.h>
52364+#include <linux/sched.h>
52365+#include <linux/file.h>
52366+#include <linux/fs.h>
52367+#include <linux/mount.h>
52368+#include <linux/types.h>
52369+#include <linux/pid_namespace.h>
52370+#include <linux/grsecurity.h>
52371+#include <linux/grinternal.h>
52372+
52373+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
52374+{
52375+#ifdef CONFIG_GRKERNSEC
52376+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
52377+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
52378+ task->gr_is_chrooted = 1;
52379+ else
52380+ task->gr_is_chrooted = 0;
52381+
52382+ task->gr_chroot_dentry = path->dentry;
52383+#endif
52384+ return;
52385+}
52386+
52387+void gr_clear_chroot_entries(struct task_struct *task)
52388+{
52389+#ifdef CONFIG_GRKERNSEC
52390+ task->gr_is_chrooted = 0;
52391+ task->gr_chroot_dentry = NULL;
52392+#endif
52393+ return;
52394+}
52395+
52396+int
52397+gr_handle_chroot_unix(const pid_t pid)
52398+{
52399+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52400+ struct task_struct *p;
52401+
52402+ if (unlikely(!grsec_enable_chroot_unix))
52403+ return 1;
52404+
52405+ if (likely(!proc_is_chrooted(current)))
52406+ return 1;
52407+
52408+ rcu_read_lock();
52409+ read_lock(&tasklist_lock);
52410+
52411+ p = find_task_by_vpid_unrestricted(pid);
52412+ if (unlikely(p && !have_same_root(current, p))) {
52413+ read_unlock(&tasklist_lock);
52414+ rcu_read_unlock();
52415+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
52416+ return 0;
52417+ }
52418+ read_unlock(&tasklist_lock);
52419+ rcu_read_unlock();
52420+#endif
52421+ return 1;
52422+}
52423+
52424+int
52425+gr_handle_chroot_nice(void)
52426+{
52427+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52428+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
52429+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
52430+ return -EPERM;
52431+ }
52432+#endif
52433+ return 0;
52434+}
52435+
52436+int
52437+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
52438+{
52439+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52440+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
52441+ && proc_is_chrooted(current)) {
52442+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
52443+ return -EACCES;
52444+ }
52445+#endif
52446+ return 0;
52447+}
52448+
52449+int
52450+gr_handle_chroot_rawio(const struct inode *inode)
52451+{
52452+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52453+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
52454+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
52455+ return 1;
52456+#endif
52457+ return 0;
52458+}
52459+
52460+int
52461+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
52462+{
52463+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52464+ struct task_struct *p;
52465+ int ret = 0;
52466+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
52467+ return ret;
52468+
52469+ read_lock(&tasklist_lock);
52470+ do_each_pid_task(pid, type, p) {
52471+ if (!have_same_root(current, p)) {
52472+ ret = 1;
52473+ goto out;
52474+ }
52475+ } while_each_pid_task(pid, type, p);
52476+out:
52477+ read_unlock(&tasklist_lock);
52478+ return ret;
52479+#endif
52480+ return 0;
52481+}
52482+
52483+int
52484+gr_pid_is_chrooted(struct task_struct *p)
52485+{
52486+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52487+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
52488+ return 0;
52489+
52490+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
52491+ !have_same_root(current, p)) {
52492+ return 1;
52493+ }
52494+#endif
52495+ return 0;
52496+}
52497+
52498+EXPORT_SYMBOL(gr_pid_is_chrooted);
52499+
52500+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
52501+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
52502+{
52503+ struct dentry *dentry = (struct dentry *)u_dentry;
52504+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
52505+ struct dentry *realroot;
52506+ struct vfsmount *realrootmnt;
52507+ struct dentry *currentroot;
52508+ struct vfsmount *currentmnt;
52509+ struct task_struct *reaper = &init_task;
52510+ int ret = 1;
52511+
52512+ read_lock(&reaper->fs->lock);
52513+ realrootmnt = mntget(reaper->fs->root.mnt);
52514+ realroot = dget(reaper->fs->root.dentry);
52515+ read_unlock(&reaper->fs->lock);
52516+
52517+ read_lock(&current->fs->lock);
52518+ currentmnt = mntget(current->fs->root.mnt);
52519+ currentroot = dget(current->fs->root.dentry);
52520+ read_unlock(&current->fs->lock);
52521+
52522+ spin_lock(&dcache_lock);
52523+ for (;;) {
52524+ if (unlikely((dentry == realroot && mnt == realrootmnt)
52525+ || (dentry == currentroot && mnt == currentmnt)))
52526+ break;
52527+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
52528+ if (mnt->mnt_parent == mnt)
52529+ break;
52530+ dentry = mnt->mnt_mountpoint;
52531+ mnt = mnt->mnt_parent;
52532+ continue;
52533+ }
52534+ dentry = dentry->d_parent;
52535+ }
52536+ spin_unlock(&dcache_lock);
52537+
52538+ dput(currentroot);
52539+ mntput(currentmnt);
52540+
52541+ /* access is outside of chroot */
52542+ if (dentry == realroot && mnt == realrootmnt)
52543+ ret = 0;
52544+
52545+ dput(realroot);
52546+ mntput(realrootmnt);
52547+ return ret;
52548+}
52549+#endif
52550+
52551+int
52552+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
52553+{
52554+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52555+ if (!grsec_enable_chroot_fchdir)
52556+ return 1;
52557+
52558+ if (!proc_is_chrooted(current))
52559+ return 1;
52560+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
52561+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
52562+ return 0;
52563+ }
52564+#endif
52565+ return 1;
52566+}
52567+
52568+int
52569+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52570+ const time_t shm_createtime)
52571+{
52572+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52573+ struct task_struct *p;
52574+ time_t starttime;
52575+
52576+ if (unlikely(!grsec_enable_chroot_shmat))
52577+ return 1;
52578+
52579+ if (likely(!proc_is_chrooted(current)))
52580+ return 1;
52581+
52582+ rcu_read_lock();
52583+ read_lock(&tasklist_lock);
52584+
52585+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
52586+ starttime = p->start_time.tv_sec;
52587+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
52588+ if (have_same_root(current, p)) {
52589+ goto allow;
52590+ } else {
52591+ read_unlock(&tasklist_lock);
52592+ rcu_read_unlock();
52593+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52594+ return 0;
52595+ }
52596+ }
52597+ /* creator exited, pid reuse, fall through to next check */
52598+ }
52599+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
52600+ if (unlikely(!have_same_root(current, p))) {
52601+ read_unlock(&tasklist_lock);
52602+ rcu_read_unlock();
52603+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
52604+ return 0;
52605+ }
52606+ }
52607+
52608+allow:
52609+ read_unlock(&tasklist_lock);
52610+ rcu_read_unlock();
52611+#endif
52612+ return 1;
52613+}
52614+
52615+void
52616+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
52617+{
52618+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52619+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
52620+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
52621+#endif
52622+ return;
52623+}
52624+
52625+int
52626+gr_handle_chroot_mknod(const struct dentry *dentry,
52627+ const struct vfsmount *mnt, const int mode)
52628+{
52629+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52630+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
52631+ proc_is_chrooted(current)) {
52632+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
52633+ return -EPERM;
52634+ }
52635+#endif
52636+ return 0;
52637+}
52638+
52639+int
52640+gr_handle_chroot_mount(const struct dentry *dentry,
52641+ const struct vfsmount *mnt, const char *dev_name)
52642+{
52643+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52644+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
52645+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
52646+ return -EPERM;
52647+ }
52648+#endif
52649+ return 0;
52650+}
52651+
52652+int
52653+gr_handle_chroot_pivot(void)
52654+{
52655+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52656+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
52657+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
52658+ return -EPERM;
52659+ }
52660+#endif
52661+ return 0;
52662+}
52663+
52664+int
52665+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
52666+{
52667+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52668+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
52669+ !gr_is_outside_chroot(dentry, mnt)) {
52670+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
52671+ return -EPERM;
52672+ }
52673+#endif
52674+ return 0;
52675+}
52676+
52677+int
52678+gr_handle_chroot_caps(struct path *path)
52679+{
52680+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52681+ if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
52682+ (init_task.fs->root.dentry != path->dentry) &&
52683+ (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
52684+
52685+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
52686+ const struct cred *old = current_cred();
52687+ struct cred *new = prepare_creds();
52688+ if (new == NULL)
52689+ return 1;
52690+
52691+ new->cap_permitted = cap_drop(old->cap_permitted,
52692+ chroot_caps);
52693+ new->cap_inheritable = cap_drop(old->cap_inheritable,
52694+ chroot_caps);
52695+ new->cap_effective = cap_drop(old->cap_effective,
52696+ chroot_caps);
52697+
52698+ commit_creds(new);
52699+
52700+ return 0;
52701+ }
52702+#endif
52703+ return 0;
52704+}
52705+
52706+int
52707+gr_handle_chroot_sysctl(const int op)
52708+{
52709+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52710+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
52711+ && (op & MAY_WRITE))
52712+ return -EACCES;
52713+#endif
52714+ return 0;
52715+}
52716+
52717+void
52718+gr_handle_chroot_chdir(struct path *path)
52719+{
52720+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52721+ if (grsec_enable_chroot_chdir)
52722+ set_fs_pwd(current->fs, path);
52723+#endif
52724+ return;
52725+}
52726+
52727+int
52728+gr_handle_chroot_chmod(const struct dentry *dentry,
52729+ const struct vfsmount *mnt, const int mode)
52730+{
52731+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52732+ /* allow chmod +s on directories, but not on files */
52733+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
52734+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
52735+ proc_is_chrooted(current)) {
52736+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
52737+ return -EPERM;
52738+ }
52739+#endif
52740+ return 0;
52741+}
52742+
52743+#ifdef CONFIG_SECURITY
52744+EXPORT_SYMBOL(gr_handle_chroot_caps);
52745+#endif
52746diff -urNp linux-2.6.32.46/grsecurity/grsec_disabled.c linux-2.6.32.46/grsecurity/grsec_disabled.c
52747--- linux-2.6.32.46/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
52748+++ linux-2.6.32.46/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
52749@@ -0,0 +1,447 @@
52750+#include <linux/kernel.h>
52751+#include <linux/module.h>
52752+#include <linux/sched.h>
52753+#include <linux/file.h>
52754+#include <linux/fs.h>
52755+#include <linux/kdev_t.h>
52756+#include <linux/net.h>
52757+#include <linux/in.h>
52758+#include <linux/ip.h>
52759+#include <linux/skbuff.h>
52760+#include <linux/sysctl.h>
52761+
52762+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
52763+void
52764+pax_set_initial_flags(struct linux_binprm *bprm)
52765+{
52766+ return;
52767+}
52768+#endif
52769+
52770+#ifdef CONFIG_SYSCTL
52771+__u32
52772+gr_handle_sysctl(const struct ctl_table * table, const int op)
52773+{
52774+ return 0;
52775+}
52776+#endif
52777+
52778+#ifdef CONFIG_TASKSTATS
52779+int gr_is_taskstats_denied(int pid)
52780+{
52781+ return 0;
52782+}
52783+#endif
52784+
52785+int
52786+gr_acl_is_enabled(void)
52787+{
52788+ return 0;
52789+}
52790+
52791+int
52792+gr_handle_rawio(const struct inode *inode)
52793+{
52794+ return 0;
52795+}
52796+
52797+void
52798+gr_acl_handle_psacct(struct task_struct *task, const long code)
52799+{
52800+ return;
52801+}
52802+
52803+int
52804+gr_handle_ptrace(struct task_struct *task, const long request)
52805+{
52806+ return 0;
52807+}
52808+
52809+int
52810+gr_handle_proc_ptrace(struct task_struct *task)
52811+{
52812+ return 0;
52813+}
52814+
52815+void
52816+gr_learn_resource(const struct task_struct *task,
52817+ const int res, const unsigned long wanted, const int gt)
52818+{
52819+ return;
52820+}
52821+
52822+int
52823+gr_set_acls(const int type)
52824+{
52825+ return 0;
52826+}
52827+
52828+int
52829+gr_check_hidden_task(const struct task_struct *tsk)
52830+{
52831+ return 0;
52832+}
52833+
52834+int
52835+gr_check_protected_task(const struct task_struct *task)
52836+{
52837+ return 0;
52838+}
52839+
52840+int
52841+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52842+{
52843+ return 0;
52844+}
52845+
52846+void
52847+gr_copy_label(struct task_struct *tsk)
52848+{
52849+ return;
52850+}
52851+
52852+void
52853+gr_set_pax_flags(struct task_struct *task)
52854+{
52855+ return;
52856+}
52857+
52858+int
52859+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
52860+ const int unsafe_share)
52861+{
52862+ return 0;
52863+}
52864+
52865+void
52866+gr_handle_delete(const ino_t ino, const dev_t dev)
52867+{
52868+ return;
52869+}
52870+
52871+void
52872+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
52873+{
52874+ return;
52875+}
52876+
52877+void
52878+gr_handle_crash(struct task_struct *task, const int sig)
52879+{
52880+ return;
52881+}
52882+
52883+int
52884+gr_check_crash_exec(const struct file *filp)
52885+{
52886+ return 0;
52887+}
52888+
52889+int
52890+gr_check_crash_uid(const uid_t uid)
52891+{
52892+ return 0;
52893+}
52894+
52895+void
52896+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
52897+ struct dentry *old_dentry,
52898+ struct dentry *new_dentry,
52899+ struct vfsmount *mnt, const __u8 replace)
52900+{
52901+ return;
52902+}
52903+
52904+int
52905+gr_search_socket(const int family, const int type, const int protocol)
52906+{
52907+ return 1;
52908+}
52909+
52910+int
52911+gr_search_connectbind(const int mode, const struct socket *sock,
52912+ const struct sockaddr_in *addr)
52913+{
52914+ return 0;
52915+}
52916+
52917+int
52918+gr_is_capable(const int cap)
52919+{
52920+ return 1;
52921+}
52922+
52923+int
52924+gr_is_capable_nolog(const int cap)
52925+{
52926+ return 1;
52927+}
52928+
52929+void
52930+gr_handle_alertkill(struct task_struct *task)
52931+{
52932+ return;
52933+}
52934+
52935+__u32
52936+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
52937+{
52938+ return 1;
52939+}
52940+
52941+__u32
52942+gr_acl_handle_hidden_file(const struct dentry * dentry,
52943+ const struct vfsmount * mnt)
52944+{
52945+ return 1;
52946+}
52947+
52948+__u32
52949+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52950+ const int fmode)
52951+{
52952+ return 1;
52953+}
52954+
52955+__u32
52956+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
52957+{
52958+ return 1;
52959+}
52960+
52961+__u32
52962+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
52963+{
52964+ return 1;
52965+}
52966+
52967+int
52968+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
52969+ unsigned int *vm_flags)
52970+{
52971+ return 1;
52972+}
52973+
52974+__u32
52975+gr_acl_handle_truncate(const struct dentry * dentry,
52976+ const struct vfsmount * mnt)
52977+{
52978+ return 1;
52979+}
52980+
52981+__u32
52982+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
52983+{
52984+ return 1;
52985+}
52986+
52987+__u32
52988+gr_acl_handle_access(const struct dentry * dentry,
52989+ const struct vfsmount * mnt, const int fmode)
52990+{
52991+ return 1;
52992+}
52993+
52994+__u32
52995+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
52996+ mode_t mode)
52997+{
52998+ return 1;
52999+}
53000+
53001+__u32
53002+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
53003+ mode_t mode)
53004+{
53005+ return 1;
53006+}
53007+
53008+__u32
53009+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
53010+{
53011+ return 1;
53012+}
53013+
53014+__u32
53015+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
53016+{
53017+ return 1;
53018+}
53019+
53020+void
53021+grsecurity_init(void)
53022+{
53023+ return;
53024+}
53025+
53026+__u32
53027+gr_acl_handle_mknod(const struct dentry * new_dentry,
53028+ const struct dentry * parent_dentry,
53029+ const struct vfsmount * parent_mnt,
53030+ const int mode)
53031+{
53032+ return 1;
53033+}
53034+
53035+__u32
53036+gr_acl_handle_mkdir(const struct dentry * new_dentry,
53037+ const struct dentry * parent_dentry,
53038+ const struct vfsmount * parent_mnt)
53039+{
53040+ return 1;
53041+}
53042+
53043+__u32
53044+gr_acl_handle_symlink(const struct dentry * new_dentry,
53045+ const struct dentry * parent_dentry,
53046+ const struct vfsmount * parent_mnt, const char *from)
53047+{
53048+ return 1;
53049+}
53050+
53051+__u32
53052+gr_acl_handle_link(const struct dentry * new_dentry,
53053+ const struct dentry * parent_dentry,
53054+ const struct vfsmount * parent_mnt,
53055+ const struct dentry * old_dentry,
53056+ const struct vfsmount * old_mnt, const char *to)
53057+{
53058+ return 1;
53059+}
53060+
53061+int
53062+gr_acl_handle_rename(const struct dentry *new_dentry,
53063+ const struct dentry *parent_dentry,
53064+ const struct vfsmount *parent_mnt,
53065+ const struct dentry *old_dentry,
53066+ const struct inode *old_parent_inode,
53067+ const struct vfsmount *old_mnt, const char *newname)
53068+{
53069+ return 0;
53070+}
53071+
53072+int
53073+gr_acl_handle_filldir(const struct file *file, const char *name,
53074+ const int namelen, const ino_t ino)
53075+{
53076+ return 1;
53077+}
53078+
53079+int
53080+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53081+ const time_t shm_createtime, const uid_t cuid, const int shmid)
53082+{
53083+ return 1;
53084+}
53085+
53086+int
53087+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
53088+{
53089+ return 0;
53090+}
53091+
53092+int
53093+gr_search_accept(const struct socket *sock)
53094+{
53095+ return 0;
53096+}
53097+
53098+int
53099+gr_search_listen(const struct socket *sock)
53100+{
53101+ return 0;
53102+}
53103+
53104+int
53105+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53106+{
53107+ return 0;
53108+}
53109+
53110+__u32
53111+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53112+{
53113+ return 1;
53114+}
53115+
53116+__u32
53117+gr_acl_handle_creat(const struct dentry * dentry,
53118+ const struct dentry * p_dentry,
53119+ const struct vfsmount * p_mnt, const int fmode,
53120+ const int imode)
53121+{
53122+ return 1;
53123+}
53124+
53125+void
53126+gr_acl_handle_exit(void)
53127+{
53128+ return;
53129+}
53130+
53131+int
53132+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53133+{
53134+ return 1;
53135+}
53136+
53137+void
53138+gr_set_role_label(const uid_t uid, const gid_t gid)
53139+{
53140+ return;
53141+}
53142+
53143+int
53144+gr_acl_handle_procpidmem(const struct task_struct *task)
53145+{
53146+ return 0;
53147+}
53148+
53149+int
53150+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53151+{
53152+ return 0;
53153+}
53154+
53155+int
53156+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53157+{
53158+ return 0;
53159+}
53160+
53161+void
53162+gr_set_kernel_label(struct task_struct *task)
53163+{
53164+ return;
53165+}
53166+
53167+int
53168+gr_check_user_change(int real, int effective, int fs)
53169+{
53170+ return 0;
53171+}
53172+
53173+int
53174+gr_check_group_change(int real, int effective, int fs)
53175+{
53176+ return 0;
53177+}
53178+
53179+int gr_acl_enable_at_secure(void)
53180+{
53181+ return 0;
53182+}
53183+
53184+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53185+{
53186+ return dentry->d_inode->i_sb->s_dev;
53187+}
53188+
53189+EXPORT_SYMBOL(gr_is_capable);
53190+EXPORT_SYMBOL(gr_is_capable_nolog);
53191+EXPORT_SYMBOL(gr_learn_resource);
53192+EXPORT_SYMBOL(gr_set_kernel_label);
53193+#ifdef CONFIG_SECURITY
53194+EXPORT_SYMBOL(gr_check_user_change);
53195+EXPORT_SYMBOL(gr_check_group_change);
53196+#endif
53197diff -urNp linux-2.6.32.46/grsecurity/grsec_exec.c linux-2.6.32.46/grsecurity/grsec_exec.c
53198--- linux-2.6.32.46/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53199+++ linux-2.6.32.46/grsecurity/grsec_exec.c 2011-08-11 19:57:19.000000000 -0400
53200@@ -0,0 +1,132 @@
53201+#include <linux/kernel.h>
53202+#include <linux/sched.h>
53203+#include <linux/file.h>
53204+#include <linux/binfmts.h>
53205+#include <linux/smp_lock.h>
53206+#include <linux/fs.h>
53207+#include <linux/types.h>
53208+#include <linux/grdefs.h>
53209+#include <linux/grinternal.h>
53210+#include <linux/capability.h>
53211+#include <linux/compat.h>
53212+
53213+#include <asm/uaccess.h>
53214+
53215+#ifdef CONFIG_GRKERNSEC_EXECLOG
53216+static char gr_exec_arg_buf[132];
53217+static DEFINE_MUTEX(gr_exec_arg_mutex);
53218+#endif
53219+
53220+void
53221+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
53222+{
53223+#ifdef CONFIG_GRKERNSEC_EXECLOG
53224+ char *grarg = gr_exec_arg_buf;
53225+ unsigned int i, x, execlen = 0;
53226+ char c;
53227+
53228+ if (!((grsec_enable_execlog && grsec_enable_group &&
53229+ in_group_p(grsec_audit_gid))
53230+ || (grsec_enable_execlog && !grsec_enable_group)))
53231+ return;
53232+
53233+ mutex_lock(&gr_exec_arg_mutex);
53234+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
53235+
53236+ if (unlikely(argv == NULL))
53237+ goto log;
53238+
53239+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
53240+ const char __user *p;
53241+ unsigned int len;
53242+
53243+ if (copy_from_user(&p, argv + i, sizeof(p)))
53244+ goto log;
53245+ if (!p)
53246+ goto log;
53247+ len = strnlen_user(p, 128 - execlen);
53248+ if (len > 128 - execlen)
53249+ len = 128 - execlen;
53250+ else if (len > 0)
53251+ len--;
53252+ if (copy_from_user(grarg + execlen, p, len))
53253+ goto log;
53254+
53255+ /* rewrite unprintable characters */
53256+ for (x = 0; x < len; x++) {
53257+ c = *(grarg + execlen + x);
53258+ if (c < 32 || c > 126)
53259+ *(grarg + execlen + x) = ' ';
53260+ }
53261+
53262+ execlen += len;
53263+ *(grarg + execlen) = ' ';
53264+ *(grarg + execlen + 1) = '\0';
53265+ execlen++;
53266+ }
53267+
53268+ log:
53269+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53270+ bprm->file->f_path.mnt, grarg);
53271+ mutex_unlock(&gr_exec_arg_mutex);
53272+#endif
53273+ return;
53274+}
53275+
53276+#ifdef CONFIG_COMPAT
53277+void
53278+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
53279+{
53280+#ifdef CONFIG_GRKERNSEC_EXECLOG
53281+ char *grarg = gr_exec_arg_buf;
53282+ unsigned int i, x, execlen = 0;
53283+ char c;
53284+
53285+ if (!((grsec_enable_execlog && grsec_enable_group &&
53286+ in_group_p(grsec_audit_gid))
53287+ || (grsec_enable_execlog && !grsec_enable_group)))
53288+ return;
53289+
53290+ mutex_lock(&gr_exec_arg_mutex);
53291+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
53292+
53293+ if (unlikely(argv == NULL))
53294+ goto log;
53295+
53296+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
53297+ compat_uptr_t p;
53298+ unsigned int len;
53299+
53300+ if (get_user(p, argv + i))
53301+ goto log;
53302+ len = strnlen_user(compat_ptr(p), 128 - execlen);
53303+ if (len > 128 - execlen)
53304+ len = 128 - execlen;
53305+ else if (len > 0)
53306+ len--;
53307+ else
53308+ goto log;
53309+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
53310+ goto log;
53311+
53312+ /* rewrite unprintable characters */
53313+ for (x = 0; x < len; x++) {
53314+ c = *(grarg + execlen + x);
53315+ if (c < 32 || c > 126)
53316+ *(grarg + execlen + x) = ' ';
53317+ }
53318+
53319+ execlen += len;
53320+ *(grarg + execlen) = ' ';
53321+ *(grarg + execlen + 1) = '\0';
53322+ execlen++;
53323+ }
53324+
53325+ log:
53326+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53327+ bprm->file->f_path.mnt, grarg);
53328+ mutex_unlock(&gr_exec_arg_mutex);
53329+#endif
53330+ return;
53331+}
53332+#endif
53333diff -urNp linux-2.6.32.46/grsecurity/grsec_fifo.c linux-2.6.32.46/grsecurity/grsec_fifo.c
53334--- linux-2.6.32.46/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
53335+++ linux-2.6.32.46/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
53336@@ -0,0 +1,24 @@
53337+#include <linux/kernel.h>
53338+#include <linux/sched.h>
53339+#include <linux/fs.h>
53340+#include <linux/file.h>
53341+#include <linux/grinternal.h>
53342+
53343+int
53344+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
53345+ const struct dentry *dir, const int flag, const int acc_mode)
53346+{
53347+#ifdef CONFIG_GRKERNSEC_FIFO
53348+ const struct cred *cred = current_cred();
53349+
53350+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
53351+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
53352+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
53353+ (cred->fsuid != dentry->d_inode->i_uid)) {
53354+ if (!inode_permission(dentry->d_inode, acc_mode))
53355+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
53356+ return -EACCES;
53357+ }
53358+#endif
53359+ return 0;
53360+}
53361diff -urNp linux-2.6.32.46/grsecurity/grsec_fork.c linux-2.6.32.46/grsecurity/grsec_fork.c
53362--- linux-2.6.32.46/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
53363+++ linux-2.6.32.46/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
53364@@ -0,0 +1,23 @@
53365+#include <linux/kernel.h>
53366+#include <linux/sched.h>
53367+#include <linux/grsecurity.h>
53368+#include <linux/grinternal.h>
53369+#include <linux/errno.h>
53370+
53371+void
53372+gr_log_forkfail(const int retval)
53373+{
53374+#ifdef CONFIG_GRKERNSEC_FORKFAIL
53375+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
53376+ switch (retval) {
53377+ case -EAGAIN:
53378+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
53379+ break;
53380+ case -ENOMEM:
53381+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
53382+ break;
53383+ }
53384+ }
53385+#endif
53386+ return;
53387+}
53388diff -urNp linux-2.6.32.46/grsecurity/grsec_init.c linux-2.6.32.46/grsecurity/grsec_init.c
53389--- linux-2.6.32.46/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
53390+++ linux-2.6.32.46/grsecurity/grsec_init.c 2011-08-11 19:57:42.000000000 -0400
53391@@ -0,0 +1,270 @@
53392+#include <linux/kernel.h>
53393+#include <linux/sched.h>
53394+#include <linux/mm.h>
53395+#include <linux/smp_lock.h>
53396+#include <linux/gracl.h>
53397+#include <linux/slab.h>
53398+#include <linux/vmalloc.h>
53399+#include <linux/percpu.h>
53400+#include <linux/module.h>
53401+
53402+int grsec_enable_brute;
53403+int grsec_enable_link;
53404+int grsec_enable_dmesg;
53405+int grsec_enable_harden_ptrace;
53406+int grsec_enable_fifo;
53407+int grsec_enable_execlog;
53408+int grsec_enable_signal;
53409+int grsec_enable_forkfail;
53410+int grsec_enable_audit_ptrace;
53411+int grsec_enable_time;
53412+int grsec_enable_audit_textrel;
53413+int grsec_enable_group;
53414+int grsec_audit_gid;
53415+int grsec_enable_chdir;
53416+int grsec_enable_mount;
53417+int grsec_enable_rofs;
53418+int grsec_enable_chroot_findtask;
53419+int grsec_enable_chroot_mount;
53420+int grsec_enable_chroot_shmat;
53421+int grsec_enable_chroot_fchdir;
53422+int grsec_enable_chroot_double;
53423+int grsec_enable_chroot_pivot;
53424+int grsec_enable_chroot_chdir;
53425+int grsec_enable_chroot_chmod;
53426+int grsec_enable_chroot_mknod;
53427+int grsec_enable_chroot_nice;
53428+int grsec_enable_chroot_execlog;
53429+int grsec_enable_chroot_caps;
53430+int grsec_enable_chroot_sysctl;
53431+int grsec_enable_chroot_unix;
53432+int grsec_enable_tpe;
53433+int grsec_tpe_gid;
53434+int grsec_enable_blackhole;
53435+#ifdef CONFIG_IPV6_MODULE
53436+EXPORT_SYMBOL(grsec_enable_blackhole);
53437+#endif
53438+int grsec_lastack_retries;
53439+int grsec_enable_tpe_all;
53440+int grsec_enable_tpe_invert;
53441+int grsec_enable_socket_all;
53442+int grsec_socket_all_gid;
53443+int grsec_enable_socket_client;
53444+int grsec_socket_client_gid;
53445+int grsec_enable_socket_server;
53446+int grsec_socket_server_gid;
53447+int grsec_resource_logging;
53448+int grsec_disable_privio;
53449+int grsec_enable_log_rwxmaps;
53450+int grsec_lock;
53451+
53452+DEFINE_SPINLOCK(grsec_alert_lock);
53453+unsigned long grsec_alert_wtime = 0;
53454+unsigned long grsec_alert_fyet = 0;
53455+
53456+DEFINE_SPINLOCK(grsec_audit_lock);
53457+
53458+DEFINE_RWLOCK(grsec_exec_file_lock);
53459+
53460+char *gr_shared_page[4];
53461+
53462+char *gr_alert_log_fmt;
53463+char *gr_audit_log_fmt;
53464+char *gr_alert_log_buf;
53465+char *gr_audit_log_buf;
53466+
53467+extern struct gr_arg *gr_usermode;
53468+extern unsigned char *gr_system_salt;
53469+extern unsigned char *gr_system_sum;
53470+
53471+void __init
53472+grsecurity_init(void)
53473+{
53474+ int j;
53475+ /* create the per-cpu shared pages */
53476+
53477+#ifdef CONFIG_X86
53478+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
53479+#endif
53480+
53481+ for (j = 0; j < 4; j++) {
53482+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
53483+ if (gr_shared_page[j] == NULL) {
53484+ panic("Unable to allocate grsecurity shared page");
53485+ return;
53486+ }
53487+ }
53488+
53489+ /* allocate log buffers */
53490+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
53491+ if (!gr_alert_log_fmt) {
53492+ panic("Unable to allocate grsecurity alert log format buffer");
53493+ return;
53494+ }
53495+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
53496+ if (!gr_audit_log_fmt) {
53497+ panic("Unable to allocate grsecurity audit log format buffer");
53498+ return;
53499+ }
53500+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53501+ if (!gr_alert_log_buf) {
53502+ panic("Unable to allocate grsecurity alert log buffer");
53503+ return;
53504+ }
53505+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53506+ if (!gr_audit_log_buf) {
53507+ panic("Unable to allocate grsecurity audit log buffer");
53508+ return;
53509+ }
53510+
53511+ /* allocate memory for authentication structure */
53512+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
53513+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
53514+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
53515+
53516+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
53517+ panic("Unable to allocate grsecurity authentication structure");
53518+ return;
53519+ }
53520+
53521+
53522+#ifdef CONFIG_GRKERNSEC_IO
53523+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
53524+ grsec_disable_privio = 1;
53525+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53526+ grsec_disable_privio = 1;
53527+#else
53528+ grsec_disable_privio = 0;
53529+#endif
53530+#endif
53531+
53532+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53533+ /* for backward compatibility, tpe_invert always defaults to on if
53534+ enabled in the kernel
53535+ */
53536+ grsec_enable_tpe_invert = 1;
53537+#endif
53538+
53539+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53540+#ifndef CONFIG_GRKERNSEC_SYSCTL
53541+ grsec_lock = 1;
53542+#endif
53543+
53544+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
53545+ grsec_enable_audit_textrel = 1;
53546+#endif
53547+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53548+ grsec_enable_log_rwxmaps = 1;
53549+#endif
53550+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
53551+ grsec_enable_group = 1;
53552+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
53553+#endif
53554+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53555+ grsec_enable_chdir = 1;
53556+#endif
53557+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53558+ grsec_enable_harden_ptrace = 1;
53559+#endif
53560+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
53561+ grsec_enable_mount = 1;
53562+#endif
53563+#ifdef CONFIG_GRKERNSEC_LINK
53564+ grsec_enable_link = 1;
53565+#endif
53566+#ifdef CONFIG_GRKERNSEC_BRUTE
53567+ grsec_enable_brute = 1;
53568+#endif
53569+#ifdef CONFIG_GRKERNSEC_DMESG
53570+ grsec_enable_dmesg = 1;
53571+#endif
53572+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
53573+ grsec_enable_blackhole = 1;
53574+ grsec_lastack_retries = 4;
53575+#endif
53576+#ifdef CONFIG_GRKERNSEC_FIFO
53577+ grsec_enable_fifo = 1;
53578+#endif
53579+#ifdef CONFIG_GRKERNSEC_EXECLOG
53580+ grsec_enable_execlog = 1;
53581+#endif
53582+#ifdef CONFIG_GRKERNSEC_SIGNAL
53583+ grsec_enable_signal = 1;
53584+#endif
53585+#ifdef CONFIG_GRKERNSEC_FORKFAIL
53586+ grsec_enable_forkfail = 1;
53587+#endif
53588+#ifdef CONFIG_GRKERNSEC_TIME
53589+ grsec_enable_time = 1;
53590+#endif
53591+#ifdef CONFIG_GRKERNSEC_RESLOG
53592+ grsec_resource_logging = 1;
53593+#endif
53594+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53595+ grsec_enable_chroot_findtask = 1;
53596+#endif
53597+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53598+ grsec_enable_chroot_unix = 1;
53599+#endif
53600+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53601+ grsec_enable_chroot_mount = 1;
53602+#endif
53603+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53604+ grsec_enable_chroot_fchdir = 1;
53605+#endif
53606+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53607+ grsec_enable_chroot_shmat = 1;
53608+#endif
53609+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
53610+ grsec_enable_audit_ptrace = 1;
53611+#endif
53612+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53613+ grsec_enable_chroot_double = 1;
53614+#endif
53615+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53616+ grsec_enable_chroot_pivot = 1;
53617+#endif
53618+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53619+ grsec_enable_chroot_chdir = 1;
53620+#endif
53621+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53622+ grsec_enable_chroot_chmod = 1;
53623+#endif
53624+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53625+ grsec_enable_chroot_mknod = 1;
53626+#endif
53627+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53628+ grsec_enable_chroot_nice = 1;
53629+#endif
53630+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53631+ grsec_enable_chroot_execlog = 1;
53632+#endif
53633+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53634+ grsec_enable_chroot_caps = 1;
53635+#endif
53636+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53637+ grsec_enable_chroot_sysctl = 1;
53638+#endif
53639+#ifdef CONFIG_GRKERNSEC_TPE
53640+ grsec_enable_tpe = 1;
53641+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
53642+#ifdef CONFIG_GRKERNSEC_TPE_ALL
53643+ grsec_enable_tpe_all = 1;
53644+#endif
53645+#endif
53646+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
53647+ grsec_enable_socket_all = 1;
53648+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
53649+#endif
53650+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
53651+ grsec_enable_socket_client = 1;
53652+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
53653+#endif
53654+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
53655+ grsec_enable_socket_server = 1;
53656+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
53657+#endif
53658+#endif
53659+
53660+ return;
53661+}
53662diff -urNp linux-2.6.32.46/grsecurity/grsec_link.c linux-2.6.32.46/grsecurity/grsec_link.c
53663--- linux-2.6.32.46/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
53664+++ linux-2.6.32.46/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
53665@@ -0,0 +1,43 @@
53666+#include <linux/kernel.h>
53667+#include <linux/sched.h>
53668+#include <linux/fs.h>
53669+#include <linux/file.h>
53670+#include <linux/grinternal.h>
53671+
53672+int
53673+gr_handle_follow_link(const struct inode *parent,
53674+ const struct inode *inode,
53675+ const struct dentry *dentry, const struct vfsmount *mnt)
53676+{
53677+#ifdef CONFIG_GRKERNSEC_LINK
53678+ const struct cred *cred = current_cred();
53679+
53680+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
53681+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
53682+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
53683+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
53684+ return -EACCES;
53685+ }
53686+#endif
53687+ return 0;
53688+}
53689+
53690+int
53691+gr_handle_hardlink(const struct dentry *dentry,
53692+ const struct vfsmount *mnt,
53693+ struct inode *inode, const int mode, const char *to)
53694+{
53695+#ifdef CONFIG_GRKERNSEC_LINK
53696+ const struct cred *cred = current_cred();
53697+
53698+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
53699+ (!S_ISREG(mode) || (mode & S_ISUID) ||
53700+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
53701+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
53702+ !capable(CAP_FOWNER) && cred->uid) {
53703+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
53704+ return -EPERM;
53705+ }
53706+#endif
53707+ return 0;
53708+}
53709diff -urNp linux-2.6.32.46/grsecurity/grsec_log.c linux-2.6.32.46/grsecurity/grsec_log.c
53710--- linux-2.6.32.46/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
53711+++ linux-2.6.32.46/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
53712@@ -0,0 +1,310 @@
53713+#include <linux/kernel.h>
53714+#include <linux/sched.h>
53715+#include <linux/file.h>
53716+#include <linux/tty.h>
53717+#include <linux/fs.h>
53718+#include <linux/grinternal.h>
53719+
53720+#ifdef CONFIG_TREE_PREEMPT_RCU
53721+#define DISABLE_PREEMPT() preempt_disable()
53722+#define ENABLE_PREEMPT() preempt_enable()
53723+#else
53724+#define DISABLE_PREEMPT()
53725+#define ENABLE_PREEMPT()
53726+#endif
53727+
53728+#define BEGIN_LOCKS(x) \
53729+ DISABLE_PREEMPT(); \
53730+ rcu_read_lock(); \
53731+ read_lock(&tasklist_lock); \
53732+ read_lock(&grsec_exec_file_lock); \
53733+ if (x != GR_DO_AUDIT) \
53734+ spin_lock(&grsec_alert_lock); \
53735+ else \
53736+ spin_lock(&grsec_audit_lock)
53737+
53738+#define END_LOCKS(x) \
53739+ if (x != GR_DO_AUDIT) \
53740+ spin_unlock(&grsec_alert_lock); \
53741+ else \
53742+ spin_unlock(&grsec_audit_lock); \
53743+ read_unlock(&grsec_exec_file_lock); \
53744+ read_unlock(&tasklist_lock); \
53745+ rcu_read_unlock(); \
53746+ ENABLE_PREEMPT(); \
53747+ if (x == GR_DONT_AUDIT) \
53748+ gr_handle_alertkill(current)
53749+
53750+enum {
53751+ FLOODING,
53752+ NO_FLOODING
53753+};
53754+
53755+extern char *gr_alert_log_fmt;
53756+extern char *gr_audit_log_fmt;
53757+extern char *gr_alert_log_buf;
53758+extern char *gr_audit_log_buf;
53759+
53760+static int gr_log_start(int audit)
53761+{
53762+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
53763+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
53764+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53765+
53766+ if (audit == GR_DO_AUDIT)
53767+ goto set_fmt;
53768+
53769+ if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
53770+ grsec_alert_wtime = jiffies;
53771+ grsec_alert_fyet = 0;
53772+ } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
53773+ grsec_alert_fyet++;
53774+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
53775+ grsec_alert_wtime = jiffies;
53776+ grsec_alert_fyet++;
53777+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
53778+ return FLOODING;
53779+ } else return FLOODING;
53780+
53781+set_fmt:
53782+ memset(buf, 0, PAGE_SIZE);
53783+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
53784+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
53785+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
53786+ } else if (current->signal->curr_ip) {
53787+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
53788+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
53789+ } else if (gr_acl_is_enabled()) {
53790+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
53791+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
53792+ } else {
53793+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
53794+ strcpy(buf, fmt);
53795+ }
53796+
53797+ return NO_FLOODING;
53798+}
53799+
53800+static void gr_log_middle(int audit, const char *msg, va_list ap)
53801+ __attribute__ ((format (printf, 2, 0)));
53802+
53803+static void gr_log_middle(int audit, const char *msg, va_list ap)
53804+{
53805+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53806+ unsigned int len = strlen(buf);
53807+
53808+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
53809+
53810+ return;
53811+}
53812+
53813+static void gr_log_middle_varargs(int audit, const char *msg, ...)
53814+ __attribute__ ((format (printf, 2, 3)));
53815+
53816+static void gr_log_middle_varargs(int audit, const char *msg, ...)
53817+{
53818+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53819+ unsigned int len = strlen(buf);
53820+ va_list ap;
53821+
53822+ va_start(ap, msg);
53823+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
53824+ va_end(ap);
53825+
53826+ return;
53827+}
53828+
53829+static void gr_log_end(int audit)
53830+{
53831+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
53832+ unsigned int len = strlen(buf);
53833+
53834+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
53835+ printk("%s\n", buf);
53836+
53837+ return;
53838+}
53839+
53840+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
53841+{
53842+ int logtype;
53843+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
53844+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
53845+ void *voidptr = NULL;
53846+ int num1 = 0, num2 = 0;
53847+ unsigned long ulong1 = 0, ulong2 = 0;
53848+ struct dentry *dentry = NULL;
53849+ struct vfsmount *mnt = NULL;
53850+ struct file *file = NULL;
53851+ struct task_struct *task = NULL;
53852+ const struct cred *cred, *pcred;
53853+ va_list ap;
53854+
53855+ BEGIN_LOCKS(audit);
53856+ logtype = gr_log_start(audit);
53857+ if (logtype == FLOODING) {
53858+ END_LOCKS(audit);
53859+ return;
53860+ }
53861+ va_start(ap, argtypes);
53862+ switch (argtypes) {
53863+ case GR_TTYSNIFF:
53864+ task = va_arg(ap, struct task_struct *);
53865+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
53866+ break;
53867+ case GR_SYSCTL_HIDDEN:
53868+ str1 = va_arg(ap, char *);
53869+ gr_log_middle_varargs(audit, msg, result, str1);
53870+ break;
53871+ case GR_RBAC:
53872+ dentry = va_arg(ap, struct dentry *);
53873+ mnt = va_arg(ap, struct vfsmount *);
53874+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
53875+ break;
53876+ case GR_RBAC_STR:
53877+ dentry = va_arg(ap, struct dentry *);
53878+ mnt = va_arg(ap, struct vfsmount *);
53879+ str1 = va_arg(ap, char *);
53880+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
53881+ break;
53882+ case GR_STR_RBAC:
53883+ str1 = va_arg(ap, char *);
53884+ dentry = va_arg(ap, struct dentry *);
53885+ mnt = va_arg(ap, struct vfsmount *);
53886+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
53887+ break;
53888+ case GR_RBAC_MODE2:
53889+ dentry = va_arg(ap, struct dentry *);
53890+ mnt = va_arg(ap, struct vfsmount *);
53891+ str1 = va_arg(ap, char *);
53892+ str2 = va_arg(ap, char *);
53893+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
53894+ break;
53895+ case GR_RBAC_MODE3:
53896+ dentry = va_arg(ap, struct dentry *);
53897+ mnt = va_arg(ap, struct vfsmount *);
53898+ str1 = va_arg(ap, char *);
53899+ str2 = va_arg(ap, char *);
53900+ str3 = va_arg(ap, char *);
53901+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
53902+ break;
53903+ case GR_FILENAME:
53904+ dentry = va_arg(ap, struct dentry *);
53905+ mnt = va_arg(ap, struct vfsmount *);
53906+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
53907+ break;
53908+ case GR_STR_FILENAME:
53909+ str1 = va_arg(ap, char *);
53910+ dentry = va_arg(ap, struct dentry *);
53911+ mnt = va_arg(ap, struct vfsmount *);
53912+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
53913+ break;
53914+ case GR_FILENAME_STR:
53915+ dentry = va_arg(ap, struct dentry *);
53916+ mnt = va_arg(ap, struct vfsmount *);
53917+ str1 = va_arg(ap, char *);
53918+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
53919+ break;
53920+ case GR_FILENAME_TWO_INT:
53921+ dentry = va_arg(ap, struct dentry *);
53922+ mnt = va_arg(ap, struct vfsmount *);
53923+ num1 = va_arg(ap, int);
53924+ num2 = va_arg(ap, int);
53925+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
53926+ break;
53927+ case GR_FILENAME_TWO_INT_STR:
53928+ dentry = va_arg(ap, struct dentry *);
53929+ mnt = va_arg(ap, struct vfsmount *);
53930+ num1 = va_arg(ap, int);
53931+ num2 = va_arg(ap, int);
53932+ str1 = va_arg(ap, char *);
53933+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
53934+ break;
53935+ case GR_TEXTREL:
53936+ file = va_arg(ap, struct file *);
53937+ ulong1 = va_arg(ap, unsigned long);
53938+ ulong2 = va_arg(ap, unsigned long);
53939+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
53940+ break;
53941+ case GR_PTRACE:
53942+ task = va_arg(ap, struct task_struct *);
53943+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
53944+ break;
53945+ case GR_RESOURCE:
53946+ task = va_arg(ap, struct task_struct *);
53947+ cred = __task_cred(task);
53948+ pcred = __task_cred(task->real_parent);
53949+ ulong1 = va_arg(ap, unsigned long);
53950+ str1 = va_arg(ap, char *);
53951+ ulong2 = va_arg(ap, unsigned long);
53952+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
53953+ break;
53954+ case GR_CAP:
53955+ task = va_arg(ap, struct task_struct *);
53956+ cred = __task_cred(task);
53957+ pcred = __task_cred(task->real_parent);
53958+ str1 = va_arg(ap, char *);
53959+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
53960+ break;
53961+ case GR_SIG:
53962+ str1 = va_arg(ap, char *);
53963+ voidptr = va_arg(ap, void *);
53964+ gr_log_middle_varargs(audit, msg, str1, voidptr);
53965+ break;
53966+ case GR_SIG2:
53967+ task = va_arg(ap, struct task_struct *);
53968+ cred = __task_cred(task);
53969+ pcred = __task_cred(task->real_parent);
53970+ num1 = va_arg(ap, int);
53971+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
53972+ break;
53973+ case GR_CRASH1:
53974+ task = va_arg(ap, struct task_struct *);
53975+ cred = __task_cred(task);
53976+ pcred = __task_cred(task->real_parent);
53977+ ulong1 = va_arg(ap, unsigned long);
53978+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
53979+ break;
53980+ case GR_CRASH2:
53981+ task = va_arg(ap, struct task_struct *);
53982+ cred = __task_cred(task);
53983+ pcred = __task_cred(task->real_parent);
53984+ ulong1 = va_arg(ap, unsigned long);
53985+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
53986+ break;
53987+ case GR_RWXMAP:
53988+ file = va_arg(ap, struct file *);
53989+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
53990+ break;
53991+ case GR_PSACCT:
53992+ {
53993+ unsigned int wday, cday;
53994+ __u8 whr, chr;
53995+ __u8 wmin, cmin;
53996+ __u8 wsec, csec;
53997+ char cur_tty[64] = { 0 };
53998+ char parent_tty[64] = { 0 };
53999+
54000+ task = va_arg(ap, struct task_struct *);
54001+ wday = va_arg(ap, unsigned int);
54002+ cday = va_arg(ap, unsigned int);
54003+ whr = va_arg(ap, int);
54004+ chr = va_arg(ap, int);
54005+ wmin = va_arg(ap, int);
54006+ cmin = va_arg(ap, int);
54007+ wsec = va_arg(ap, int);
54008+ csec = va_arg(ap, int);
54009+ ulong1 = va_arg(ap, unsigned long);
54010+ cred = __task_cred(task);
54011+ pcred = __task_cred(task->real_parent);
54012+
54013+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54014+ }
54015+ break;
54016+ default:
54017+ gr_log_middle(audit, msg, ap);
54018+ }
54019+ va_end(ap);
54020+ gr_log_end(audit);
54021+ END_LOCKS(audit);
54022+}
54023diff -urNp linux-2.6.32.46/grsecurity/grsec_mem.c linux-2.6.32.46/grsecurity/grsec_mem.c
54024--- linux-2.6.32.46/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
54025+++ linux-2.6.32.46/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
54026@@ -0,0 +1,33 @@
54027+#include <linux/kernel.h>
54028+#include <linux/sched.h>
54029+#include <linux/mm.h>
54030+#include <linux/mman.h>
54031+#include <linux/grinternal.h>
54032+
54033+void
54034+gr_handle_ioperm(void)
54035+{
54036+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
54037+ return;
54038+}
54039+
54040+void
54041+gr_handle_iopl(void)
54042+{
54043+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
54044+ return;
54045+}
54046+
54047+void
54048+gr_handle_mem_readwrite(u64 from, u64 to)
54049+{
54050+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
54051+ return;
54052+}
54053+
54054+void
54055+gr_handle_vm86(void)
54056+{
54057+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
54058+ return;
54059+}
54060diff -urNp linux-2.6.32.46/grsecurity/grsec_mount.c linux-2.6.32.46/grsecurity/grsec_mount.c
54061--- linux-2.6.32.46/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
54062+++ linux-2.6.32.46/grsecurity/grsec_mount.c 2011-06-20 19:47:03.000000000 -0400
54063@@ -0,0 +1,62 @@
54064+#include <linux/kernel.h>
54065+#include <linux/sched.h>
54066+#include <linux/mount.h>
54067+#include <linux/grsecurity.h>
54068+#include <linux/grinternal.h>
54069+
54070+void
54071+gr_log_remount(const char *devname, const int retval)
54072+{
54073+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54074+ if (grsec_enable_mount && (retval >= 0))
54075+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
54076+#endif
54077+ return;
54078+}
54079+
54080+void
54081+gr_log_unmount(const char *devname, const int retval)
54082+{
54083+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54084+ if (grsec_enable_mount && (retval >= 0))
54085+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
54086+#endif
54087+ return;
54088+}
54089+
54090+void
54091+gr_log_mount(const char *from, const char *to, const int retval)
54092+{
54093+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54094+ if (grsec_enable_mount && (retval >= 0))
54095+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54096+#endif
54097+ return;
54098+}
54099+
54100+int
54101+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54102+{
54103+#ifdef CONFIG_GRKERNSEC_ROFS
54104+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54105+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54106+ return -EPERM;
54107+ } else
54108+ return 0;
54109+#endif
54110+ return 0;
54111+}
54112+
54113+int
54114+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54115+{
54116+#ifdef CONFIG_GRKERNSEC_ROFS
54117+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54118+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54119+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54120+ return -EPERM;
54121+ } else
54122+ return 0;
54123+#endif
54124+ return 0;
54125+}
54126diff -urNp linux-2.6.32.46/grsecurity/grsec_pax.c linux-2.6.32.46/grsecurity/grsec_pax.c
54127--- linux-2.6.32.46/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54128+++ linux-2.6.32.46/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
54129@@ -0,0 +1,36 @@
54130+#include <linux/kernel.h>
54131+#include <linux/sched.h>
54132+#include <linux/mm.h>
54133+#include <linux/file.h>
54134+#include <linux/grinternal.h>
54135+#include <linux/grsecurity.h>
54136+
54137+void
54138+gr_log_textrel(struct vm_area_struct * vma)
54139+{
54140+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54141+ if (grsec_enable_audit_textrel)
54142+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54143+#endif
54144+ return;
54145+}
54146+
54147+void
54148+gr_log_rwxmmap(struct file *file)
54149+{
54150+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54151+ if (grsec_enable_log_rwxmaps)
54152+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54153+#endif
54154+ return;
54155+}
54156+
54157+void
54158+gr_log_rwxmprotect(struct file *file)
54159+{
54160+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54161+ if (grsec_enable_log_rwxmaps)
54162+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54163+#endif
54164+ return;
54165+}
54166diff -urNp linux-2.6.32.46/grsecurity/grsec_ptrace.c linux-2.6.32.46/grsecurity/grsec_ptrace.c
54167--- linux-2.6.32.46/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54168+++ linux-2.6.32.46/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
54169@@ -0,0 +1,14 @@
54170+#include <linux/kernel.h>
54171+#include <linux/sched.h>
54172+#include <linux/grinternal.h>
54173+#include <linux/grsecurity.h>
54174+
54175+void
54176+gr_audit_ptrace(struct task_struct *task)
54177+{
54178+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54179+ if (grsec_enable_audit_ptrace)
54180+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54181+#endif
54182+ return;
54183+}
54184diff -urNp linux-2.6.32.46/grsecurity/grsec_sig.c linux-2.6.32.46/grsecurity/grsec_sig.c
54185--- linux-2.6.32.46/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54186+++ linux-2.6.32.46/grsecurity/grsec_sig.c 2011-06-29 19:40:31.000000000 -0400
54187@@ -0,0 +1,205 @@
54188+#include <linux/kernel.h>
54189+#include <linux/sched.h>
54190+#include <linux/delay.h>
54191+#include <linux/grsecurity.h>
54192+#include <linux/grinternal.h>
54193+#include <linux/hardirq.h>
54194+
54195+char *signames[] = {
54196+ [SIGSEGV] = "Segmentation fault",
54197+ [SIGILL] = "Illegal instruction",
54198+ [SIGABRT] = "Abort",
54199+ [SIGBUS] = "Invalid alignment/Bus error"
54200+};
54201+
54202+void
54203+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
54204+{
54205+#ifdef CONFIG_GRKERNSEC_SIGNAL
54206+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
54207+ (sig == SIGABRT) || (sig == SIGBUS))) {
54208+ if (t->pid == current->pid) {
54209+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
54210+ } else {
54211+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
54212+ }
54213+ }
54214+#endif
54215+ return;
54216+}
54217+
54218+int
54219+gr_handle_signal(const struct task_struct *p, const int sig)
54220+{
54221+#ifdef CONFIG_GRKERNSEC
54222+ if (current->pid > 1 && gr_check_protected_task(p)) {
54223+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
54224+ return -EPERM;
54225+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
54226+ return -EPERM;
54227+ }
54228+#endif
54229+ return 0;
54230+}
54231+
54232+#ifdef CONFIG_GRKERNSEC
54233+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
54234+
54235+int gr_fake_force_sig(int sig, struct task_struct *t)
54236+{
54237+ unsigned long int flags;
54238+ int ret, blocked, ignored;
54239+ struct k_sigaction *action;
54240+
54241+ spin_lock_irqsave(&t->sighand->siglock, flags);
54242+ action = &t->sighand->action[sig-1];
54243+ ignored = action->sa.sa_handler == SIG_IGN;
54244+ blocked = sigismember(&t->blocked, sig);
54245+ if (blocked || ignored) {
54246+ action->sa.sa_handler = SIG_DFL;
54247+ if (blocked) {
54248+ sigdelset(&t->blocked, sig);
54249+ recalc_sigpending_and_wake(t);
54250+ }
54251+ }
54252+ if (action->sa.sa_handler == SIG_DFL)
54253+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
54254+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
54255+
54256+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
54257+
54258+ return ret;
54259+}
54260+#endif
54261+
54262+#ifdef CONFIG_GRKERNSEC_BRUTE
54263+#define GR_USER_BAN_TIME (15 * 60)
54264+
54265+static int __get_dumpable(unsigned long mm_flags)
54266+{
54267+ int ret;
54268+
54269+ ret = mm_flags & MMF_DUMPABLE_MASK;
54270+ return (ret >= 2) ? 2 : ret;
54271+}
54272+#endif
54273+
54274+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
54275+{
54276+#ifdef CONFIG_GRKERNSEC_BRUTE
54277+ uid_t uid = 0;
54278+
54279+ if (!grsec_enable_brute)
54280+ return;
54281+
54282+ rcu_read_lock();
54283+ read_lock(&tasklist_lock);
54284+ read_lock(&grsec_exec_file_lock);
54285+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
54286+ p->real_parent->brute = 1;
54287+ else {
54288+ const struct cred *cred = __task_cred(p), *cred2;
54289+ struct task_struct *tsk, *tsk2;
54290+
54291+ if (!__get_dumpable(mm_flags) && cred->uid) {
54292+ struct user_struct *user;
54293+
54294+ uid = cred->uid;
54295+
54296+ /* this is put upon execution past expiration */
54297+ user = find_user(uid);
54298+ if (user == NULL)
54299+ goto unlock;
54300+ user->banned = 1;
54301+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
54302+ if (user->ban_expires == ~0UL)
54303+ user->ban_expires--;
54304+
54305+ do_each_thread(tsk2, tsk) {
54306+ cred2 = __task_cred(tsk);
54307+ if (tsk != p && cred2->uid == uid)
54308+ gr_fake_force_sig(SIGKILL, tsk);
54309+ } while_each_thread(tsk2, tsk);
54310+ }
54311+ }
54312+unlock:
54313+ read_unlock(&grsec_exec_file_lock);
54314+ read_unlock(&tasklist_lock);
54315+ rcu_read_unlock();
54316+
54317+ if (uid)
54318+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
54319+#endif
54320+ return;
54321+}
54322+
54323+void gr_handle_brute_check(void)
54324+{
54325+#ifdef CONFIG_GRKERNSEC_BRUTE
54326+ if (current->brute)
54327+ msleep(30 * 1000);
54328+#endif
54329+ return;
54330+}
54331+
54332+void gr_handle_kernel_exploit(void)
54333+{
54334+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
54335+ const struct cred *cred;
54336+ struct task_struct *tsk, *tsk2;
54337+ struct user_struct *user;
54338+ uid_t uid;
54339+
54340+ if (in_irq() || in_serving_softirq() || in_nmi())
54341+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
54342+
54343+ uid = current_uid();
54344+
54345+ if (uid == 0)
54346+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
54347+ else {
54348+ /* kill all the processes of this user, hold a reference
54349+ to their creds struct, and prevent them from creating
54350+ another process until system reset
54351+ */
54352+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
54353+ /* we intentionally leak this ref */
54354+ user = get_uid(current->cred->user);
54355+ if (user) {
54356+ user->banned = 1;
54357+ user->ban_expires = ~0UL;
54358+ }
54359+
54360+ read_lock(&tasklist_lock);
54361+ do_each_thread(tsk2, tsk) {
54362+ cred = __task_cred(tsk);
54363+ if (cred->uid == uid)
54364+ gr_fake_force_sig(SIGKILL, tsk);
54365+ } while_each_thread(tsk2, tsk);
54366+ read_unlock(&tasklist_lock);
54367+ }
54368+#endif
54369+}
54370+
54371+int __gr_process_user_ban(struct user_struct *user)
54372+{
54373+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54374+ if (unlikely(user->banned)) {
54375+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
54376+ user->banned = 0;
54377+ user->ban_expires = 0;
54378+ free_uid(user);
54379+ } else
54380+ return -EPERM;
54381+ }
54382+#endif
54383+ return 0;
54384+}
54385+
54386+int gr_process_user_ban(void)
54387+{
54388+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54389+ return __gr_process_user_ban(current->cred->user);
54390+#endif
54391+ return 0;
54392+}
54393diff -urNp linux-2.6.32.46/grsecurity/grsec_sock.c linux-2.6.32.46/grsecurity/grsec_sock.c
54394--- linux-2.6.32.46/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
54395+++ linux-2.6.32.46/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
54396@@ -0,0 +1,275 @@
54397+#include <linux/kernel.h>
54398+#include <linux/module.h>
54399+#include <linux/sched.h>
54400+#include <linux/file.h>
54401+#include <linux/net.h>
54402+#include <linux/in.h>
54403+#include <linux/ip.h>
54404+#include <net/sock.h>
54405+#include <net/inet_sock.h>
54406+#include <linux/grsecurity.h>
54407+#include <linux/grinternal.h>
54408+#include <linux/gracl.h>
54409+
54410+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
54411+EXPORT_SYMBOL(gr_cap_rtnetlink);
54412+
54413+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
54414+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
54415+
54416+EXPORT_SYMBOL(gr_search_udp_recvmsg);
54417+EXPORT_SYMBOL(gr_search_udp_sendmsg);
54418+
54419+#ifdef CONFIG_UNIX_MODULE
54420+EXPORT_SYMBOL(gr_acl_handle_unix);
54421+EXPORT_SYMBOL(gr_acl_handle_mknod);
54422+EXPORT_SYMBOL(gr_handle_chroot_unix);
54423+EXPORT_SYMBOL(gr_handle_create);
54424+#endif
54425+
54426+#ifdef CONFIG_GRKERNSEC
54427+#define gr_conn_table_size 32749
54428+struct conn_table_entry {
54429+ struct conn_table_entry *next;
54430+ struct signal_struct *sig;
54431+};
54432+
54433+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
54434+DEFINE_SPINLOCK(gr_conn_table_lock);
54435+
54436+extern const char * gr_socktype_to_name(unsigned char type);
54437+extern const char * gr_proto_to_name(unsigned char proto);
54438+extern const char * gr_sockfamily_to_name(unsigned char family);
54439+
54440+static __inline__ int
54441+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
54442+{
54443+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
54444+}
54445+
54446+static __inline__ int
54447+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
54448+ __u16 sport, __u16 dport)
54449+{
54450+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
54451+ sig->gr_sport == sport && sig->gr_dport == dport))
54452+ return 1;
54453+ else
54454+ return 0;
54455+}
54456+
54457+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
54458+{
54459+ struct conn_table_entry **match;
54460+ unsigned int index;
54461+
54462+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54463+ sig->gr_sport, sig->gr_dport,
54464+ gr_conn_table_size);
54465+
54466+ newent->sig = sig;
54467+
54468+ match = &gr_conn_table[index];
54469+ newent->next = *match;
54470+ *match = newent;
54471+
54472+ return;
54473+}
54474+
54475+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
54476+{
54477+ struct conn_table_entry *match, *last = NULL;
54478+ unsigned int index;
54479+
54480+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54481+ sig->gr_sport, sig->gr_dport,
54482+ gr_conn_table_size);
54483+
54484+ match = gr_conn_table[index];
54485+ while (match && !conn_match(match->sig,
54486+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
54487+ sig->gr_dport)) {
54488+ last = match;
54489+ match = match->next;
54490+ }
54491+
54492+ if (match) {
54493+ if (last)
54494+ last->next = match->next;
54495+ else
54496+ gr_conn_table[index] = NULL;
54497+ kfree(match);
54498+ }
54499+
54500+ return;
54501+}
54502+
54503+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
54504+ __u16 sport, __u16 dport)
54505+{
54506+ struct conn_table_entry *match;
54507+ unsigned int index;
54508+
54509+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
54510+
54511+ match = gr_conn_table[index];
54512+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
54513+ match = match->next;
54514+
54515+ if (match)
54516+ return match->sig;
54517+ else
54518+ return NULL;
54519+}
54520+
54521+#endif
54522+
54523+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
54524+{
54525+#ifdef CONFIG_GRKERNSEC
54526+ struct signal_struct *sig = task->signal;
54527+ struct conn_table_entry *newent;
54528+
54529+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
54530+ if (newent == NULL)
54531+ return;
54532+ /* no bh lock needed since we are called with bh disabled */
54533+ spin_lock(&gr_conn_table_lock);
54534+ gr_del_task_from_ip_table_nolock(sig);
54535+ sig->gr_saddr = inet->rcv_saddr;
54536+ sig->gr_daddr = inet->daddr;
54537+ sig->gr_sport = inet->sport;
54538+ sig->gr_dport = inet->dport;
54539+ gr_add_to_task_ip_table_nolock(sig, newent);
54540+ spin_unlock(&gr_conn_table_lock);
54541+#endif
54542+ return;
54543+}
54544+
54545+void gr_del_task_from_ip_table(struct task_struct *task)
54546+{
54547+#ifdef CONFIG_GRKERNSEC
54548+ spin_lock_bh(&gr_conn_table_lock);
54549+ gr_del_task_from_ip_table_nolock(task->signal);
54550+ spin_unlock_bh(&gr_conn_table_lock);
54551+#endif
54552+ return;
54553+}
54554+
54555+void
54556+gr_attach_curr_ip(const struct sock *sk)
54557+{
54558+#ifdef CONFIG_GRKERNSEC
54559+ struct signal_struct *p, *set;
54560+ const struct inet_sock *inet = inet_sk(sk);
54561+
54562+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
54563+ return;
54564+
54565+ set = current->signal;
54566+
54567+ spin_lock_bh(&gr_conn_table_lock);
54568+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
54569+ inet->dport, inet->sport);
54570+ if (unlikely(p != NULL)) {
54571+ set->curr_ip = p->curr_ip;
54572+ set->used_accept = 1;
54573+ gr_del_task_from_ip_table_nolock(p);
54574+ spin_unlock_bh(&gr_conn_table_lock);
54575+ return;
54576+ }
54577+ spin_unlock_bh(&gr_conn_table_lock);
54578+
54579+ set->curr_ip = inet->daddr;
54580+ set->used_accept = 1;
54581+#endif
54582+ return;
54583+}
54584+
54585+int
54586+gr_handle_sock_all(const int family, const int type, const int protocol)
54587+{
54588+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54589+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
54590+ (family != AF_UNIX)) {
54591+ if (family == AF_INET)
54592+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
54593+ else
54594+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
54595+ return -EACCES;
54596+ }
54597+#endif
54598+ return 0;
54599+}
54600+
54601+int
54602+gr_handle_sock_server(const struct sockaddr *sck)
54603+{
54604+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54605+ if (grsec_enable_socket_server &&
54606+ in_group_p(grsec_socket_server_gid) &&
54607+ sck && (sck->sa_family != AF_UNIX) &&
54608+ (sck->sa_family != AF_LOCAL)) {
54609+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54610+ return -EACCES;
54611+ }
54612+#endif
54613+ return 0;
54614+}
54615+
54616+int
54617+gr_handle_sock_server_other(const struct sock *sck)
54618+{
54619+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54620+ if (grsec_enable_socket_server &&
54621+ in_group_p(grsec_socket_server_gid) &&
54622+ sck && (sck->sk_family != AF_UNIX) &&
54623+ (sck->sk_family != AF_LOCAL)) {
54624+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
54625+ return -EACCES;
54626+ }
54627+#endif
54628+ return 0;
54629+}
54630+
54631+int
54632+gr_handle_sock_client(const struct sockaddr *sck)
54633+{
54634+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54635+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
54636+ sck && (sck->sa_family != AF_UNIX) &&
54637+ (sck->sa_family != AF_LOCAL)) {
54638+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
54639+ return -EACCES;
54640+ }
54641+#endif
54642+ return 0;
54643+}
54644+
54645+kernel_cap_t
54646+gr_cap_rtnetlink(struct sock *sock)
54647+{
54648+#ifdef CONFIG_GRKERNSEC
54649+ if (!gr_acl_is_enabled())
54650+ return current_cap();
54651+ else if (sock->sk_protocol == NETLINK_ISCSI &&
54652+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
54653+ gr_is_capable(CAP_SYS_ADMIN))
54654+ return current_cap();
54655+ else if (sock->sk_protocol == NETLINK_AUDIT &&
54656+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
54657+ gr_is_capable(CAP_AUDIT_WRITE) &&
54658+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
54659+ gr_is_capable(CAP_AUDIT_CONTROL))
54660+ return current_cap();
54661+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
54662+ ((sock->sk_protocol == NETLINK_ROUTE) ?
54663+ gr_is_capable_nolog(CAP_NET_ADMIN) :
54664+ gr_is_capable(CAP_NET_ADMIN)))
54665+ return current_cap();
54666+ else
54667+ return __cap_empty_set;
54668+#else
54669+ return current_cap();
54670+#endif
54671+}
54672diff -urNp linux-2.6.32.46/grsecurity/grsec_sysctl.c linux-2.6.32.46/grsecurity/grsec_sysctl.c
54673--- linux-2.6.32.46/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
54674+++ linux-2.6.32.46/grsecurity/grsec_sysctl.c 2011-08-11 19:57:54.000000000 -0400
54675@@ -0,0 +1,479 @@
54676+#include <linux/kernel.h>
54677+#include <linux/sched.h>
54678+#include <linux/sysctl.h>
54679+#include <linux/grsecurity.h>
54680+#include <linux/grinternal.h>
54681+
54682+int
54683+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
54684+{
54685+#ifdef CONFIG_GRKERNSEC_SYSCTL
54686+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
54687+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
54688+ return -EACCES;
54689+ }
54690+#endif
54691+ return 0;
54692+}
54693+
54694+#ifdef CONFIG_GRKERNSEC_ROFS
54695+static int __maybe_unused one = 1;
54696+#endif
54697+
54698+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
54699+ctl_table grsecurity_table[] = {
54700+#ifdef CONFIG_GRKERNSEC_SYSCTL
54701+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
54702+#ifdef CONFIG_GRKERNSEC_IO
54703+ {
54704+ .ctl_name = CTL_UNNUMBERED,
54705+ .procname = "disable_priv_io",
54706+ .data = &grsec_disable_privio,
54707+ .maxlen = sizeof(int),
54708+ .mode = 0600,
54709+ .proc_handler = &proc_dointvec,
54710+ },
54711+#endif
54712+#endif
54713+#ifdef CONFIG_GRKERNSEC_LINK
54714+ {
54715+ .ctl_name = CTL_UNNUMBERED,
54716+ .procname = "linking_restrictions",
54717+ .data = &grsec_enable_link,
54718+ .maxlen = sizeof(int),
54719+ .mode = 0600,
54720+ .proc_handler = &proc_dointvec,
54721+ },
54722+#endif
54723+#ifdef CONFIG_GRKERNSEC_BRUTE
54724+ {
54725+ .ctl_name = CTL_UNNUMBERED,
54726+ .procname = "deter_bruteforce",
54727+ .data = &grsec_enable_brute,
54728+ .maxlen = sizeof(int),
54729+ .mode = 0600,
54730+ .proc_handler = &proc_dointvec,
54731+ },
54732+#endif
54733+#ifdef CONFIG_GRKERNSEC_FIFO
54734+ {
54735+ .ctl_name = CTL_UNNUMBERED,
54736+ .procname = "fifo_restrictions",
54737+ .data = &grsec_enable_fifo,
54738+ .maxlen = sizeof(int),
54739+ .mode = 0600,
54740+ .proc_handler = &proc_dointvec,
54741+ },
54742+#endif
54743+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54744+ {
54745+ .ctl_name = CTL_UNNUMBERED,
54746+ .procname = "ip_blackhole",
54747+ .data = &grsec_enable_blackhole,
54748+ .maxlen = sizeof(int),
54749+ .mode = 0600,
54750+ .proc_handler = &proc_dointvec,
54751+ },
54752+ {
54753+ .ctl_name = CTL_UNNUMBERED,
54754+ .procname = "lastack_retries",
54755+ .data = &grsec_lastack_retries,
54756+ .maxlen = sizeof(int),
54757+ .mode = 0600,
54758+ .proc_handler = &proc_dointvec,
54759+ },
54760+#endif
54761+#ifdef CONFIG_GRKERNSEC_EXECLOG
54762+ {
54763+ .ctl_name = CTL_UNNUMBERED,
54764+ .procname = "exec_logging",
54765+ .data = &grsec_enable_execlog,
54766+ .maxlen = sizeof(int),
54767+ .mode = 0600,
54768+ .proc_handler = &proc_dointvec,
54769+ },
54770+#endif
54771+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54772+ {
54773+ .ctl_name = CTL_UNNUMBERED,
54774+ .procname = "rwxmap_logging",
54775+ .data = &grsec_enable_log_rwxmaps,
54776+ .maxlen = sizeof(int),
54777+ .mode = 0600,
54778+ .proc_handler = &proc_dointvec,
54779+ },
54780+#endif
54781+#ifdef CONFIG_GRKERNSEC_SIGNAL
54782+ {
54783+ .ctl_name = CTL_UNNUMBERED,
54784+ .procname = "signal_logging",
54785+ .data = &grsec_enable_signal,
54786+ .maxlen = sizeof(int),
54787+ .mode = 0600,
54788+ .proc_handler = &proc_dointvec,
54789+ },
54790+#endif
54791+#ifdef CONFIG_GRKERNSEC_FORKFAIL
54792+ {
54793+ .ctl_name = CTL_UNNUMBERED,
54794+ .procname = "forkfail_logging",
54795+ .data = &grsec_enable_forkfail,
54796+ .maxlen = sizeof(int),
54797+ .mode = 0600,
54798+ .proc_handler = &proc_dointvec,
54799+ },
54800+#endif
54801+#ifdef CONFIG_GRKERNSEC_TIME
54802+ {
54803+ .ctl_name = CTL_UNNUMBERED,
54804+ .procname = "timechange_logging",
54805+ .data = &grsec_enable_time,
54806+ .maxlen = sizeof(int),
54807+ .mode = 0600,
54808+ .proc_handler = &proc_dointvec,
54809+ },
54810+#endif
54811+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54812+ {
54813+ .ctl_name = CTL_UNNUMBERED,
54814+ .procname = "chroot_deny_shmat",
54815+ .data = &grsec_enable_chroot_shmat,
54816+ .maxlen = sizeof(int),
54817+ .mode = 0600,
54818+ .proc_handler = &proc_dointvec,
54819+ },
54820+#endif
54821+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54822+ {
54823+ .ctl_name = CTL_UNNUMBERED,
54824+ .procname = "chroot_deny_unix",
54825+ .data = &grsec_enable_chroot_unix,
54826+ .maxlen = sizeof(int),
54827+ .mode = 0600,
54828+ .proc_handler = &proc_dointvec,
54829+ },
54830+#endif
54831+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54832+ {
54833+ .ctl_name = CTL_UNNUMBERED,
54834+ .procname = "chroot_deny_mount",
54835+ .data = &grsec_enable_chroot_mount,
54836+ .maxlen = sizeof(int),
54837+ .mode = 0600,
54838+ .proc_handler = &proc_dointvec,
54839+ },
54840+#endif
54841+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54842+ {
54843+ .ctl_name = CTL_UNNUMBERED,
54844+ .procname = "chroot_deny_fchdir",
54845+ .data = &grsec_enable_chroot_fchdir,
54846+ .maxlen = sizeof(int),
54847+ .mode = 0600,
54848+ .proc_handler = &proc_dointvec,
54849+ },
54850+#endif
54851+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54852+ {
54853+ .ctl_name = CTL_UNNUMBERED,
54854+ .procname = "chroot_deny_chroot",
54855+ .data = &grsec_enable_chroot_double,
54856+ .maxlen = sizeof(int),
54857+ .mode = 0600,
54858+ .proc_handler = &proc_dointvec,
54859+ },
54860+#endif
54861+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54862+ {
54863+ .ctl_name = CTL_UNNUMBERED,
54864+ .procname = "chroot_deny_pivot",
54865+ .data = &grsec_enable_chroot_pivot,
54866+ .maxlen = sizeof(int),
54867+ .mode = 0600,
54868+ .proc_handler = &proc_dointvec,
54869+ },
54870+#endif
54871+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54872+ {
54873+ .ctl_name = CTL_UNNUMBERED,
54874+ .procname = "chroot_enforce_chdir",
54875+ .data = &grsec_enable_chroot_chdir,
54876+ .maxlen = sizeof(int),
54877+ .mode = 0600,
54878+ .proc_handler = &proc_dointvec,
54879+ },
54880+#endif
54881+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54882+ {
54883+ .ctl_name = CTL_UNNUMBERED,
54884+ .procname = "chroot_deny_chmod",
54885+ .data = &grsec_enable_chroot_chmod,
54886+ .maxlen = sizeof(int),
54887+ .mode = 0600,
54888+ .proc_handler = &proc_dointvec,
54889+ },
54890+#endif
54891+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54892+ {
54893+ .ctl_name = CTL_UNNUMBERED,
54894+ .procname = "chroot_deny_mknod",
54895+ .data = &grsec_enable_chroot_mknod,
54896+ .maxlen = sizeof(int),
54897+ .mode = 0600,
54898+ .proc_handler = &proc_dointvec,
54899+ },
54900+#endif
54901+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54902+ {
54903+ .ctl_name = CTL_UNNUMBERED,
54904+ .procname = "chroot_restrict_nice",
54905+ .data = &grsec_enable_chroot_nice,
54906+ .maxlen = sizeof(int),
54907+ .mode = 0600,
54908+ .proc_handler = &proc_dointvec,
54909+ },
54910+#endif
54911+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54912+ {
54913+ .ctl_name = CTL_UNNUMBERED,
54914+ .procname = "chroot_execlog",
54915+ .data = &grsec_enable_chroot_execlog,
54916+ .maxlen = sizeof(int),
54917+ .mode = 0600,
54918+ .proc_handler = &proc_dointvec,
54919+ },
54920+#endif
54921+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54922+ {
54923+ .ctl_name = CTL_UNNUMBERED,
54924+ .procname = "chroot_caps",
54925+ .data = &grsec_enable_chroot_caps,
54926+ .maxlen = sizeof(int),
54927+ .mode = 0600,
54928+ .proc_handler = &proc_dointvec,
54929+ },
54930+#endif
54931+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54932+ {
54933+ .ctl_name = CTL_UNNUMBERED,
54934+ .procname = "chroot_deny_sysctl",
54935+ .data = &grsec_enable_chroot_sysctl,
54936+ .maxlen = sizeof(int),
54937+ .mode = 0600,
54938+ .proc_handler = &proc_dointvec,
54939+ },
54940+#endif
54941+#ifdef CONFIG_GRKERNSEC_TPE
54942+ {
54943+ .ctl_name = CTL_UNNUMBERED,
54944+ .procname = "tpe",
54945+ .data = &grsec_enable_tpe,
54946+ .maxlen = sizeof(int),
54947+ .mode = 0600,
54948+ .proc_handler = &proc_dointvec,
54949+ },
54950+ {
54951+ .ctl_name = CTL_UNNUMBERED,
54952+ .procname = "tpe_gid",
54953+ .data = &grsec_tpe_gid,
54954+ .maxlen = sizeof(int),
54955+ .mode = 0600,
54956+ .proc_handler = &proc_dointvec,
54957+ },
54958+#endif
54959+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
54960+ {
54961+ .ctl_name = CTL_UNNUMBERED,
54962+ .procname = "tpe_invert",
54963+ .data = &grsec_enable_tpe_invert,
54964+ .maxlen = sizeof(int),
54965+ .mode = 0600,
54966+ .proc_handler = &proc_dointvec,
54967+ },
54968+#endif
54969+#ifdef CONFIG_GRKERNSEC_TPE_ALL
54970+ {
54971+ .ctl_name = CTL_UNNUMBERED,
54972+ .procname = "tpe_restrict_all",
54973+ .data = &grsec_enable_tpe_all,
54974+ .maxlen = sizeof(int),
54975+ .mode = 0600,
54976+ .proc_handler = &proc_dointvec,
54977+ },
54978+#endif
54979+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54980+ {
54981+ .ctl_name = CTL_UNNUMBERED,
54982+ .procname = "socket_all",
54983+ .data = &grsec_enable_socket_all,
54984+ .maxlen = sizeof(int),
54985+ .mode = 0600,
54986+ .proc_handler = &proc_dointvec,
54987+ },
54988+ {
54989+ .ctl_name = CTL_UNNUMBERED,
54990+ .procname = "socket_all_gid",
54991+ .data = &grsec_socket_all_gid,
54992+ .maxlen = sizeof(int),
54993+ .mode = 0600,
54994+ .proc_handler = &proc_dointvec,
54995+ },
54996+#endif
54997+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54998+ {
54999+ .ctl_name = CTL_UNNUMBERED,
55000+ .procname = "socket_client",
55001+ .data = &grsec_enable_socket_client,
55002+ .maxlen = sizeof(int),
55003+ .mode = 0600,
55004+ .proc_handler = &proc_dointvec,
55005+ },
55006+ {
55007+ .ctl_name = CTL_UNNUMBERED,
55008+ .procname = "socket_client_gid",
55009+ .data = &grsec_socket_client_gid,
55010+ .maxlen = sizeof(int),
55011+ .mode = 0600,
55012+ .proc_handler = &proc_dointvec,
55013+ },
55014+#endif
55015+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55016+ {
55017+ .ctl_name = CTL_UNNUMBERED,
55018+ .procname = "socket_server",
55019+ .data = &grsec_enable_socket_server,
55020+ .maxlen = sizeof(int),
55021+ .mode = 0600,
55022+ .proc_handler = &proc_dointvec,
55023+ },
55024+ {
55025+ .ctl_name = CTL_UNNUMBERED,
55026+ .procname = "socket_server_gid",
55027+ .data = &grsec_socket_server_gid,
55028+ .maxlen = sizeof(int),
55029+ .mode = 0600,
55030+ .proc_handler = &proc_dointvec,
55031+ },
55032+#endif
55033+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55034+ {
55035+ .ctl_name = CTL_UNNUMBERED,
55036+ .procname = "audit_group",
55037+ .data = &grsec_enable_group,
55038+ .maxlen = sizeof(int),
55039+ .mode = 0600,
55040+ .proc_handler = &proc_dointvec,
55041+ },
55042+ {
55043+ .ctl_name = CTL_UNNUMBERED,
55044+ .procname = "audit_gid",
55045+ .data = &grsec_audit_gid,
55046+ .maxlen = sizeof(int),
55047+ .mode = 0600,
55048+ .proc_handler = &proc_dointvec,
55049+ },
55050+#endif
55051+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55052+ {
55053+ .ctl_name = CTL_UNNUMBERED,
55054+ .procname = "audit_chdir",
55055+ .data = &grsec_enable_chdir,
55056+ .maxlen = sizeof(int),
55057+ .mode = 0600,
55058+ .proc_handler = &proc_dointvec,
55059+ },
55060+#endif
55061+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55062+ {
55063+ .ctl_name = CTL_UNNUMBERED,
55064+ .procname = "audit_mount",
55065+ .data = &grsec_enable_mount,
55066+ .maxlen = sizeof(int),
55067+ .mode = 0600,
55068+ .proc_handler = &proc_dointvec,
55069+ },
55070+#endif
55071+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55072+ {
55073+ .ctl_name = CTL_UNNUMBERED,
55074+ .procname = "audit_textrel",
55075+ .data = &grsec_enable_audit_textrel,
55076+ .maxlen = sizeof(int),
55077+ .mode = 0600,
55078+ .proc_handler = &proc_dointvec,
55079+ },
55080+#endif
55081+#ifdef CONFIG_GRKERNSEC_DMESG
55082+ {
55083+ .ctl_name = CTL_UNNUMBERED,
55084+ .procname = "dmesg",
55085+ .data = &grsec_enable_dmesg,
55086+ .maxlen = sizeof(int),
55087+ .mode = 0600,
55088+ .proc_handler = &proc_dointvec,
55089+ },
55090+#endif
55091+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55092+ {
55093+ .ctl_name = CTL_UNNUMBERED,
55094+ .procname = "chroot_findtask",
55095+ .data = &grsec_enable_chroot_findtask,
55096+ .maxlen = sizeof(int),
55097+ .mode = 0600,
55098+ .proc_handler = &proc_dointvec,
55099+ },
55100+#endif
55101+#ifdef CONFIG_GRKERNSEC_RESLOG
55102+ {
55103+ .ctl_name = CTL_UNNUMBERED,
55104+ .procname = "resource_logging",
55105+ .data = &grsec_resource_logging,
55106+ .maxlen = sizeof(int),
55107+ .mode = 0600,
55108+ .proc_handler = &proc_dointvec,
55109+ },
55110+#endif
55111+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55112+ {
55113+ .ctl_name = CTL_UNNUMBERED,
55114+ .procname = "audit_ptrace",
55115+ .data = &grsec_enable_audit_ptrace,
55116+ .maxlen = sizeof(int),
55117+ .mode = 0600,
55118+ .proc_handler = &proc_dointvec,
55119+ },
55120+#endif
55121+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55122+ {
55123+ .ctl_name = CTL_UNNUMBERED,
55124+ .procname = "harden_ptrace",
55125+ .data = &grsec_enable_harden_ptrace,
55126+ .maxlen = sizeof(int),
55127+ .mode = 0600,
55128+ .proc_handler = &proc_dointvec,
55129+ },
55130+#endif
55131+ {
55132+ .ctl_name = CTL_UNNUMBERED,
55133+ .procname = "grsec_lock",
55134+ .data = &grsec_lock,
55135+ .maxlen = sizeof(int),
55136+ .mode = 0600,
55137+ .proc_handler = &proc_dointvec,
55138+ },
55139+#endif
55140+#ifdef CONFIG_GRKERNSEC_ROFS
55141+ {
55142+ .ctl_name = CTL_UNNUMBERED,
55143+ .procname = "romount_protect",
55144+ .data = &grsec_enable_rofs,
55145+ .maxlen = sizeof(int),
55146+ .mode = 0600,
55147+ .proc_handler = &proc_dointvec_minmax,
55148+ .extra1 = &one,
55149+ .extra2 = &one,
55150+ },
55151+#endif
55152+ { .ctl_name = 0 }
55153+};
55154+#endif
55155diff -urNp linux-2.6.32.46/grsecurity/grsec_time.c linux-2.6.32.46/grsecurity/grsec_time.c
55156--- linux-2.6.32.46/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55157+++ linux-2.6.32.46/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
55158@@ -0,0 +1,16 @@
55159+#include <linux/kernel.h>
55160+#include <linux/sched.h>
55161+#include <linux/grinternal.h>
55162+#include <linux/module.h>
55163+
55164+void
55165+gr_log_timechange(void)
55166+{
55167+#ifdef CONFIG_GRKERNSEC_TIME
55168+ if (grsec_enable_time)
55169+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55170+#endif
55171+ return;
55172+}
55173+
55174+EXPORT_SYMBOL(gr_log_timechange);
55175diff -urNp linux-2.6.32.46/grsecurity/grsec_tpe.c linux-2.6.32.46/grsecurity/grsec_tpe.c
55176--- linux-2.6.32.46/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55177+++ linux-2.6.32.46/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
55178@@ -0,0 +1,39 @@
55179+#include <linux/kernel.h>
55180+#include <linux/sched.h>
55181+#include <linux/file.h>
55182+#include <linux/fs.h>
55183+#include <linux/grinternal.h>
55184+
55185+extern int gr_acl_tpe_check(void);
55186+
55187+int
55188+gr_tpe_allow(const struct file *file)
55189+{
55190+#ifdef CONFIG_GRKERNSEC
55191+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55192+ const struct cred *cred = current_cred();
55193+
55194+ if (cred->uid && ((grsec_enable_tpe &&
55195+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55196+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55197+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55198+#else
55199+ in_group_p(grsec_tpe_gid)
55200+#endif
55201+ ) || gr_acl_tpe_check()) &&
55202+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55203+ (inode->i_mode & S_IWOTH))))) {
55204+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55205+ return 0;
55206+ }
55207+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55208+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55209+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55210+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55211+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55212+ return 0;
55213+ }
55214+#endif
55215+#endif
55216+ return 1;
55217+}
55218diff -urNp linux-2.6.32.46/grsecurity/grsum.c linux-2.6.32.46/grsecurity/grsum.c
55219--- linux-2.6.32.46/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55220+++ linux-2.6.32.46/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
55221@@ -0,0 +1,61 @@
55222+#include <linux/err.h>
55223+#include <linux/kernel.h>
55224+#include <linux/sched.h>
55225+#include <linux/mm.h>
55226+#include <linux/scatterlist.h>
55227+#include <linux/crypto.h>
55228+#include <linux/gracl.h>
55229+
55230+
55231+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55232+#error "crypto and sha256 must be built into the kernel"
55233+#endif
55234+
55235+int
55236+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
55237+{
55238+ char *p;
55239+ struct crypto_hash *tfm;
55240+ struct hash_desc desc;
55241+ struct scatterlist sg;
55242+ unsigned char temp_sum[GR_SHA_LEN];
55243+ volatile int retval = 0;
55244+ volatile int dummy = 0;
55245+ unsigned int i;
55246+
55247+ sg_init_table(&sg, 1);
55248+
55249+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
55250+ if (IS_ERR(tfm)) {
55251+ /* should never happen, since sha256 should be built in */
55252+ return 1;
55253+ }
55254+
55255+ desc.tfm = tfm;
55256+ desc.flags = 0;
55257+
55258+ crypto_hash_init(&desc);
55259+
55260+ p = salt;
55261+ sg_set_buf(&sg, p, GR_SALT_LEN);
55262+ crypto_hash_update(&desc, &sg, sg.length);
55263+
55264+ p = entry->pw;
55265+ sg_set_buf(&sg, p, strlen(p));
55266+
55267+ crypto_hash_update(&desc, &sg, sg.length);
55268+
55269+ crypto_hash_final(&desc, temp_sum);
55270+
55271+ memset(entry->pw, 0, GR_PW_LEN);
55272+
55273+ for (i = 0; i < GR_SHA_LEN; i++)
55274+ if (sum[i] != temp_sum[i])
55275+ retval = 1;
55276+ else
55277+ dummy = 1; // waste a cycle
55278+
55279+ crypto_free_hash(tfm);
55280+
55281+ return retval;
55282+}
55283diff -urNp linux-2.6.32.46/grsecurity/Kconfig linux-2.6.32.46/grsecurity/Kconfig
55284--- linux-2.6.32.46/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
55285+++ linux-2.6.32.46/grsecurity/Kconfig 2011-08-17 19:04:25.000000000 -0400
55286@@ -0,0 +1,1037 @@
55287+#
55288+# grecurity configuration
55289+#
55290+
55291+menu "Grsecurity"
55292+
55293+config GRKERNSEC
55294+ bool "Grsecurity"
55295+ select CRYPTO
55296+ select CRYPTO_SHA256
55297+ help
55298+ If you say Y here, you will be able to configure many features
55299+ that will enhance the security of your system. It is highly
55300+ recommended that you say Y here and read through the help
55301+ for each option so that you fully understand the features and
55302+ can evaluate their usefulness for your machine.
55303+
55304+choice
55305+ prompt "Security Level"
55306+ depends on GRKERNSEC
55307+ default GRKERNSEC_CUSTOM
55308+
55309+config GRKERNSEC_LOW
55310+ bool "Low"
55311+ select GRKERNSEC_LINK
55312+ select GRKERNSEC_FIFO
55313+ select GRKERNSEC_RANDNET
55314+ select GRKERNSEC_DMESG
55315+ select GRKERNSEC_CHROOT
55316+ select GRKERNSEC_CHROOT_CHDIR
55317+
55318+ help
55319+ If you choose this option, several of the grsecurity options will
55320+ be enabled that will give you greater protection against a number
55321+ of attacks, while assuring that none of your software will have any
55322+ conflicts with the additional security measures. If you run a lot
55323+ of unusual software, or you are having problems with the higher
55324+ security levels, you should say Y here. With this option, the
55325+ following features are enabled:
55326+
55327+ - Linking restrictions
55328+ - FIFO restrictions
55329+ - Restricted dmesg
55330+ - Enforced chdir("/") on chroot
55331+ - Runtime module disabling
55332+
55333+config GRKERNSEC_MEDIUM
55334+ bool "Medium"
55335+ select PAX
55336+ select PAX_EI_PAX
55337+ select PAX_PT_PAX_FLAGS
55338+ select PAX_HAVE_ACL_FLAGS
55339+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55340+ select GRKERNSEC_CHROOT
55341+ select GRKERNSEC_CHROOT_SYSCTL
55342+ select GRKERNSEC_LINK
55343+ select GRKERNSEC_FIFO
55344+ select GRKERNSEC_DMESG
55345+ select GRKERNSEC_RANDNET
55346+ select GRKERNSEC_FORKFAIL
55347+ select GRKERNSEC_TIME
55348+ select GRKERNSEC_SIGNAL
55349+ select GRKERNSEC_CHROOT
55350+ select GRKERNSEC_CHROOT_UNIX
55351+ select GRKERNSEC_CHROOT_MOUNT
55352+ select GRKERNSEC_CHROOT_PIVOT
55353+ select GRKERNSEC_CHROOT_DOUBLE
55354+ select GRKERNSEC_CHROOT_CHDIR
55355+ select GRKERNSEC_CHROOT_MKNOD
55356+ select GRKERNSEC_PROC
55357+ select GRKERNSEC_PROC_USERGROUP
55358+ select PAX_RANDUSTACK
55359+ select PAX_ASLR
55360+ select PAX_RANDMMAP
55361+ select PAX_REFCOUNT if (X86 || SPARC64)
55362+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55363+
55364+ help
55365+ If you say Y here, several features in addition to those included
55366+ in the low additional security level will be enabled. These
55367+ features provide even more security to your system, though in rare
55368+ cases they may be incompatible with very old or poorly written
55369+ software. If you enable this option, make sure that your auth
55370+ service (identd) is running as gid 1001. With this option,
55371+ the following features (in addition to those provided in the
55372+ low additional security level) will be enabled:
55373+
55374+ - Failed fork logging
55375+ - Time change logging
55376+ - Signal logging
55377+ - Deny mounts in chroot
55378+ - Deny double chrooting
55379+ - Deny sysctl writes in chroot
55380+ - Deny mknod in chroot
55381+ - Deny access to abstract AF_UNIX sockets out of chroot
55382+ - Deny pivot_root in chroot
55383+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
55384+ - /proc restrictions with special GID set to 10 (usually wheel)
55385+ - Address Space Layout Randomization (ASLR)
55386+ - Prevent exploitation of most refcount overflows
55387+ - Bounds checking of copying between the kernel and userland
55388+
55389+config GRKERNSEC_HIGH
55390+ bool "High"
55391+ select GRKERNSEC_LINK
55392+ select GRKERNSEC_FIFO
55393+ select GRKERNSEC_DMESG
55394+ select GRKERNSEC_FORKFAIL
55395+ select GRKERNSEC_TIME
55396+ select GRKERNSEC_SIGNAL
55397+ select GRKERNSEC_CHROOT
55398+ select GRKERNSEC_CHROOT_SHMAT
55399+ select GRKERNSEC_CHROOT_UNIX
55400+ select GRKERNSEC_CHROOT_MOUNT
55401+ select GRKERNSEC_CHROOT_FCHDIR
55402+ select GRKERNSEC_CHROOT_PIVOT
55403+ select GRKERNSEC_CHROOT_DOUBLE
55404+ select GRKERNSEC_CHROOT_CHDIR
55405+ select GRKERNSEC_CHROOT_MKNOD
55406+ select GRKERNSEC_CHROOT_CAPS
55407+ select GRKERNSEC_CHROOT_SYSCTL
55408+ select GRKERNSEC_CHROOT_FINDTASK
55409+ select GRKERNSEC_SYSFS_RESTRICT
55410+ select GRKERNSEC_PROC
55411+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55412+ select GRKERNSEC_HIDESYM
55413+ select GRKERNSEC_BRUTE
55414+ select GRKERNSEC_PROC_USERGROUP
55415+ select GRKERNSEC_KMEM
55416+ select GRKERNSEC_RESLOG
55417+ select GRKERNSEC_RANDNET
55418+ select GRKERNSEC_PROC_ADD
55419+ select GRKERNSEC_CHROOT_CHMOD
55420+ select GRKERNSEC_CHROOT_NICE
55421+ select GRKERNSEC_AUDIT_MOUNT
55422+ select GRKERNSEC_MODHARDEN if (MODULES)
55423+ select GRKERNSEC_HARDEN_PTRACE
55424+ select GRKERNSEC_VM86 if (X86_32)
55425+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55426+ select PAX
55427+ select PAX_RANDUSTACK
55428+ select PAX_ASLR
55429+ select PAX_RANDMMAP
55430+ select PAX_NOEXEC
55431+ select PAX_MPROTECT
55432+ select PAX_EI_PAX
55433+ select PAX_PT_PAX_FLAGS
55434+ select PAX_HAVE_ACL_FLAGS
55435+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55436+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
55437+ select PAX_RANDKSTACK if (X86_TSC && X86)
55438+ select PAX_SEGMEXEC if (X86_32)
55439+ select PAX_PAGEEXEC
55440+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55441+ select PAX_EMUTRAMP if (PARISC)
55442+ select PAX_EMUSIGRT if (PARISC)
55443+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55444+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55445+ select PAX_REFCOUNT if (X86 || SPARC64)
55446+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55447+ help
55448+ If you say Y here, many of the features of grsecurity will be
55449+ enabled, which will protect you against many kinds of attacks
55450+ against your system. The heightened security comes at a cost
55451+ of an increased chance of incompatibilities with rare software
55452+ on your machine. Since this security level enables PaX, you should
55453+ view <http://pax.grsecurity.net> and read about the PaX
55454+ project. While you are there, download chpax and run it on
55455+ binaries that cause problems with PaX. Also remember that
55456+ since the /proc restrictions are enabled, you must run your
55457+ identd as gid 1001. This security level enables the following
55458+ features in addition to those listed in the low and medium
55459+ security levels:
55460+
55461+ - Additional /proc restrictions
55462+ - Chmod restrictions in chroot
55463+ - No signals, ptrace, or viewing of processes outside of chroot
55464+ - Capability restrictions in chroot
55465+ - Deny fchdir out of chroot
55466+ - Priority restrictions in chroot
55467+ - Segmentation-based implementation of PaX
55468+ - Mprotect restrictions
55469+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55470+ - Kernel stack randomization
55471+ - Mount/unmount/remount logging
55472+ - Kernel symbol hiding
55473+ - Prevention of memory exhaustion-based exploits
55474+ - Hardening of module auto-loading
55475+ - Ptrace restrictions
55476+ - Restricted vm86 mode
55477+ - Restricted sysfs/debugfs
55478+ - Active kernel exploit response
55479+
55480+config GRKERNSEC_CUSTOM
55481+ bool "Custom"
55482+ help
55483+ If you say Y here, you will be able to configure every grsecurity
55484+ option, which allows you to enable many more features that aren't
55485+ covered in the basic security levels. These additional features
55486+ include TPE, socket restrictions, and the sysctl system for
55487+ grsecurity. It is advised that you read through the help for
55488+ each option to determine its usefulness in your situation.
55489+
55490+endchoice
55491+
55492+menu "Address Space Protection"
55493+depends on GRKERNSEC
55494+
55495+config GRKERNSEC_KMEM
55496+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
55497+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55498+ help
55499+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55500+ be written to via mmap or otherwise to modify the running kernel.
55501+ /dev/port will also not be allowed to be opened. If you have module
55502+ support disabled, enabling this will close up four ways that are
55503+ currently used to insert malicious code into the running kernel.
55504+ Even with all these features enabled, we still highly recommend that
55505+ you use the RBAC system, as it is still possible for an attacker to
55506+ modify the running kernel through privileged I/O granted by ioperm/iopl.
55507+ If you are not using XFree86, you may be able to stop this additional
55508+ case by enabling the 'Disable privileged I/O' option. Though nothing
55509+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55510+ but only to video memory, which is the only writing we allow in this
55511+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55512+ not be allowed to mprotect it with PROT_WRITE later.
55513+ It is highly recommended that you say Y here if you meet all the
55514+ conditions above.
55515+
55516+config GRKERNSEC_VM86
55517+ bool "Restrict VM86 mode"
55518+ depends on X86_32
55519+
55520+ help
55521+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55522+ make use of a special execution mode on 32bit x86 processors called
55523+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55524+ video cards and will still work with this option enabled. The purpose
55525+ of the option is to prevent exploitation of emulation errors in
55526+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
55527+ Nearly all users should be able to enable this option.
55528+
55529+config GRKERNSEC_IO
55530+ bool "Disable privileged I/O"
55531+ depends on X86
55532+ select RTC_CLASS
55533+ select RTC_INTF_DEV
55534+ select RTC_DRV_CMOS
55535+
55536+ help
55537+ If you say Y here, all ioperm and iopl calls will return an error.
55538+ Ioperm and iopl can be used to modify the running kernel.
55539+ Unfortunately, some programs need this access to operate properly,
55540+ the most notable of which are XFree86 and hwclock. hwclock can be
55541+ remedied by having RTC support in the kernel, so real-time
55542+ clock support is enabled if this option is enabled, to ensure
55543+ that hwclock operates correctly. XFree86 still will not
55544+ operate correctly with this option enabled, so DO NOT CHOOSE Y
55545+ IF YOU USE XFree86. If you use XFree86 and you still want to
55546+ protect your kernel against modification, use the RBAC system.
55547+
55548+config GRKERNSEC_PROC_MEMMAP
55549+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
55550+ default y if (PAX_NOEXEC || PAX_ASLR)
55551+ depends on PAX_NOEXEC || PAX_ASLR
55552+ help
55553+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55554+ give no information about the addresses of its mappings if
55555+ PaX features that rely on random addresses are enabled on the task.
55556+ If you use PaX it is greatly recommended that you say Y here as it
55557+ closes up a hole that makes the full ASLR useless for suid
55558+ binaries.
55559+
55560+config GRKERNSEC_BRUTE
55561+ bool "Deter exploit bruteforcing"
55562+ help
55563+ If you say Y here, attempts to bruteforce exploits against forking
55564+ daemons such as apache or sshd, as well as against suid/sgid binaries
55565+ will be deterred. When a child of a forking daemon is killed by PaX
55566+ or crashes due to an illegal instruction or other suspicious signal,
55567+ the parent process will be delayed 30 seconds upon every subsequent
55568+ fork until the administrator is able to assess the situation and
55569+ restart the daemon.
55570+ In the suid/sgid case, the attempt is logged, the user has all their
55571+ processes terminated, and they are prevented from executing any further
55572+ processes for 15 minutes.
55573+ It is recommended that you also enable signal logging in the auditing
55574+ section so that logs are generated when a process triggers a suspicious
55575+ signal.
55576+ If the sysctl option is enabled, a sysctl option with name
55577+ "deter_bruteforce" is created.
55578+
55579+config GRKERNSEC_MODHARDEN
55580+ bool "Harden module auto-loading"
55581+ depends on MODULES
55582+ help
55583+ If you say Y here, module auto-loading in response to use of some
55584+ feature implemented by an unloaded module will be restricted to
55585+ root users. Enabling this option helps defend against attacks
55586+ by unprivileged users who abuse the auto-loading behavior to
55587+ cause a vulnerable module to load that is then exploited.
55588+
55589+ If this option prevents a legitimate use of auto-loading for a
55590+ non-root user, the administrator can execute modprobe manually
55591+ with the exact name of the module mentioned in the alert log.
55592+ Alternatively, the administrator can add the module to the list
55593+ of modules loaded at boot by modifying init scripts.
55594+
55595+ Modification of init scripts will most likely be needed on
55596+ Ubuntu servers with encrypted home directory support enabled,
55597+ as the first non-root user logging in will cause the ecb(aes),
55598+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55599+
55600+config GRKERNSEC_HIDESYM
55601+ bool "Hide kernel symbols"
55602+ help
55603+ If you say Y here, getting information on loaded modules, and
55604+ displaying all kernel symbols through a syscall will be restricted
55605+ to users with CAP_SYS_MODULE. For software compatibility reasons,
55606+ /proc/kallsyms will be restricted to the root user. The RBAC
55607+ system can hide that entry even from root.
55608+
55609+ This option also prevents leaking of kernel addresses through
55610+ several /proc entries.
55611+
55612+ Note that this option is only effective provided the following
55613+ conditions are met:
55614+ 1) The kernel using grsecurity is not precompiled by some distribution
55615+ 2) You have also enabled GRKERNSEC_DMESG
55616+ 3) You are using the RBAC system and hiding other files such as your
55617+ kernel image and System.map. Alternatively, enabling this option
55618+ causes the permissions on /boot, /lib/modules, and the kernel
55619+ source directory to change at compile time to prevent
55620+ reading by non-root users.
55621+ If the above conditions are met, this option will aid in providing a
55622+ useful protection against local kernel exploitation of overflows
55623+ and arbitrary read/write vulnerabilities.
55624+
55625+config GRKERNSEC_KERN_LOCKOUT
55626+ bool "Active kernel exploit response"
55627+ depends on X86 || ARM || PPC || SPARC
55628+ help
55629+ If you say Y here, when a PaX alert is triggered due to suspicious
55630+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55631+ or an OOPs occurs due to bad memory accesses, instead of just
55632+ terminating the offending process (and potentially allowing
55633+ a subsequent exploit from the same user), we will take one of two
55634+ actions:
55635+ If the user was root, we will panic the system
55636+ If the user was non-root, we will log the attempt, terminate
55637+ all processes owned by the user, then prevent them from creating
55638+ any new processes until the system is restarted
55639+ This deters repeated kernel exploitation/bruteforcing attempts
55640+ and is useful for later forensics.
55641+
55642+endmenu
55643+menu "Role Based Access Control Options"
55644+depends on GRKERNSEC
55645+
55646+config GRKERNSEC_RBAC_DEBUG
55647+ bool
55648+
55649+config GRKERNSEC_NO_RBAC
55650+ bool "Disable RBAC system"
55651+ help
55652+ If you say Y here, the /dev/grsec device will be removed from the kernel,
55653+ preventing the RBAC system from being enabled. You should only say Y
55654+ here if you have no intention of using the RBAC system, so as to prevent
55655+ an attacker with root access from misusing the RBAC system to hide files
55656+ and processes when loadable module support and /dev/[k]mem have been
55657+ locked down.
55658+
55659+config GRKERNSEC_ACL_HIDEKERN
55660+ bool "Hide kernel processes"
55661+ help
55662+ If you say Y here, all kernel threads will be hidden to all
55663+ processes but those whose subject has the "view hidden processes"
55664+ flag.
55665+
55666+config GRKERNSEC_ACL_MAXTRIES
55667+ int "Maximum tries before password lockout"
55668+ default 3
55669+ help
55670+ This option enforces the maximum number of times a user can attempt
55671+ to authorize themselves with the grsecurity RBAC system before being
55672+ denied the ability to attempt authorization again for a specified time.
55673+ The lower the number, the harder it will be to brute-force a password.
55674+
55675+config GRKERNSEC_ACL_TIMEOUT
55676+ int "Time to wait after max password tries, in seconds"
55677+ default 30
55678+ help
55679+ This option specifies the time the user must wait after attempting to
55680+ authorize to the RBAC system with the maximum number of invalid
55681+ passwords. The higher the number, the harder it will be to brute-force
55682+ a password.
55683+
55684+endmenu
55685+menu "Filesystem Protections"
55686+depends on GRKERNSEC
55687+
55688+config GRKERNSEC_PROC
55689+ bool "Proc restrictions"
55690+ help
55691+ If you say Y here, the permissions of the /proc filesystem
55692+ will be altered to enhance system security and privacy. You MUST
55693+ choose either a user only restriction or a user and group restriction.
55694+ Depending upon the option you choose, you can either restrict users to
55695+ see only the processes they themselves run, or choose a group that can
55696+ view all processes and files normally restricted to root if you choose
55697+ the "restrict to user only" option. NOTE: If you're running identd as
55698+ a non-root user, you will have to run it as the group you specify here.
55699+
55700+config GRKERNSEC_PROC_USER
55701+ bool "Restrict /proc to user only"
55702+ depends on GRKERNSEC_PROC
55703+ help
55704+ If you say Y here, non-root users will only be able to view their own
55705+ processes, and restricts them from viewing network-related information,
55706+ and viewing kernel symbol and module information.
55707+
55708+config GRKERNSEC_PROC_USERGROUP
55709+ bool "Allow special group"
55710+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55711+ help
55712+ If you say Y here, you will be able to select a group that will be
55713+ able to view all processes and network-related information. If you've
55714+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55715+ remain hidden. This option is useful if you want to run identd as
55716+ a non-root user.
55717+
55718+config GRKERNSEC_PROC_GID
55719+ int "GID for special group"
55720+ depends on GRKERNSEC_PROC_USERGROUP
55721+ default 1001
55722+
55723+config GRKERNSEC_PROC_ADD
55724+ bool "Additional restrictions"
55725+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55726+ help
55727+ If you say Y here, additional restrictions will be placed on
55728+ /proc that keep normal users from viewing device information and
55729+ slabinfo information that could be useful for exploits.
55730+
55731+config GRKERNSEC_LINK
55732+ bool "Linking restrictions"
55733+ help
55734+ If you say Y here, /tmp race exploits will be prevented, since users
55735+ will no longer be able to follow symlinks owned by other users in
55736+ world-writable +t directories (e.g. /tmp), unless the owner of the
55737+ symlink is the owner of the directory. users will also not be
55738+ able to hardlink to files they do not own. If the sysctl option is
55739+ enabled, a sysctl option with name "linking_restrictions" is created.
55740+
55741+config GRKERNSEC_FIFO
55742+ bool "FIFO restrictions"
55743+ help
55744+ If you say Y here, users will not be able to write to FIFOs they don't
55745+ own in world-writable +t directories (e.g. /tmp), unless the owner of
55746+ the FIFO is the same owner of the directory it's held in. If the sysctl
55747+ option is enabled, a sysctl option with name "fifo_restrictions" is
55748+ created.
55749+
55750+config GRKERNSEC_SYSFS_RESTRICT
55751+ bool "Sysfs/debugfs restriction"
55752+ depends on SYSFS
55753+ help
55754+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
55755+ any filesystem normally mounted under it (e.g. debugfs) will only
55756+ be accessible by root. These filesystems generally provide access
55757+ to hardware and debug information that isn't appropriate for unprivileged
55758+ users of the system. Sysfs and debugfs have also become a large source
55759+ of new vulnerabilities, ranging from infoleaks to local compromise.
55760+ There has been very little oversight with an eye toward security involved
55761+ in adding new exporters of information to these filesystems, so their
55762+ use is discouraged.
55763+ This option is equivalent to a chmod 0700 of the mount paths.
55764+
55765+config GRKERNSEC_ROFS
55766+ bool "Runtime read-only mount protection"
55767+ help
55768+ If you say Y here, a sysctl option with name "romount_protect" will
55769+ be created. By setting this option to 1 at runtime, filesystems
55770+ will be protected in the following ways:
55771+ * No new writable mounts will be allowed
55772+ * Existing read-only mounts won't be able to be remounted read/write
55773+ * Write operations will be denied on all block devices
55774+ This option acts independently of grsec_lock: once it is set to 1,
55775+ it cannot be turned off. Therefore, please be mindful of the resulting
55776+ behavior if this option is enabled in an init script on a read-only
55777+ filesystem. This feature is mainly intended for secure embedded systems.
55778+
55779+config GRKERNSEC_CHROOT
55780+ bool "Chroot jail restrictions"
55781+ help
55782+ If you say Y here, you will be able to choose several options that will
55783+ make breaking out of a chrooted jail much more difficult. If you
55784+ encounter no software incompatibilities with the following options, it
55785+ is recommended that you enable each one.
55786+
55787+config GRKERNSEC_CHROOT_MOUNT
55788+ bool "Deny mounts"
55789+ depends on GRKERNSEC_CHROOT
55790+ help
55791+ If you say Y here, processes inside a chroot will not be able to
55792+ mount or remount filesystems. If the sysctl option is enabled, a
55793+ sysctl option with name "chroot_deny_mount" is created.
55794+
55795+config GRKERNSEC_CHROOT_DOUBLE
55796+ bool "Deny double-chroots"
55797+ depends on GRKERNSEC_CHROOT
55798+ help
55799+ If you say Y here, processes inside a chroot will not be able to chroot
55800+ again outside the chroot. This is a widely used method of breaking
55801+ out of a chroot jail and should not be allowed. If the sysctl
55802+ option is enabled, a sysctl option with name
55803+ "chroot_deny_chroot" is created.
55804+
55805+config GRKERNSEC_CHROOT_PIVOT
55806+ bool "Deny pivot_root in chroot"
55807+ depends on GRKERNSEC_CHROOT
55808+ help
55809+ If you say Y here, processes inside a chroot will not be able to use
55810+ a function called pivot_root() that was introduced in Linux 2.3.41. It
55811+ works similar to chroot in that it changes the root filesystem. This
55812+ function could be misused in a chrooted process to attempt to break out
55813+ of the chroot, and therefore should not be allowed. If the sysctl
55814+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
55815+ created.
55816+
55817+config GRKERNSEC_CHROOT_CHDIR
55818+ bool "Enforce chdir(\"/\") on all chroots"
55819+ depends on GRKERNSEC_CHROOT
55820+ help
55821+ If you say Y here, the current working directory of all newly-chrooted
55822+ applications will be set to the the root directory of the chroot.
55823+ The man page on chroot(2) states:
55824+ Note that this call does not change the current working
55825+ directory, so that `.' can be outside the tree rooted at
55826+ `/'. In particular, the super-user can escape from a
55827+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
55828+
55829+ It is recommended that you say Y here, since it's not known to break
55830+ any software. If the sysctl option is enabled, a sysctl option with
55831+ name "chroot_enforce_chdir" is created.
55832+
55833+config GRKERNSEC_CHROOT_CHMOD
55834+ bool "Deny (f)chmod +s"
55835+ depends on GRKERNSEC_CHROOT
55836+ help
55837+ If you say Y here, processes inside a chroot will not be able to chmod
55838+ or fchmod files to make them have suid or sgid bits. This protects
55839+ against another published method of breaking a chroot. If the sysctl
55840+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
55841+ created.
55842+
55843+config GRKERNSEC_CHROOT_FCHDIR
55844+ bool "Deny fchdir out of chroot"
55845+ depends on GRKERNSEC_CHROOT
55846+ help
55847+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
55848+ to a file descriptor of the chrooting process that points to a directory
55849+ outside the filesystem will be stopped. If the sysctl option
55850+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
55851+
55852+config GRKERNSEC_CHROOT_MKNOD
55853+ bool "Deny mknod"
55854+ depends on GRKERNSEC_CHROOT
55855+ help
55856+ If you say Y here, processes inside a chroot will not be allowed to
55857+ mknod. The problem with using mknod inside a chroot is that it
55858+ would allow an attacker to create a device entry that is the same
55859+ as one on the physical root of your system, which could range from
55860+ anything from the console device to a device for your harddrive (which
55861+ they could then use to wipe the drive or steal data). It is recommended
55862+ that you say Y here, unless you run into software incompatibilities.
55863+ If the sysctl option is enabled, a sysctl option with name
55864+ "chroot_deny_mknod" is created.
55865+
55866+config GRKERNSEC_CHROOT_SHMAT
55867+ bool "Deny shmat() out of chroot"
55868+ depends on GRKERNSEC_CHROOT
55869+ help
55870+ If you say Y here, processes inside a chroot will not be able to attach
55871+ to shared memory segments that were created outside of the chroot jail.
55872+ It is recommended that you say Y here. If the sysctl option is enabled,
55873+ a sysctl option with name "chroot_deny_shmat" is created.
55874+
55875+config GRKERNSEC_CHROOT_UNIX
55876+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
55877+ depends on GRKERNSEC_CHROOT
55878+ help
55879+ If you say Y here, processes inside a chroot will not be able to
55880+ connect to abstract (meaning not belonging to a filesystem) Unix
55881+ domain sockets that were bound outside of a chroot. It is recommended
55882+ that you say Y here. If the sysctl option is enabled, a sysctl option
55883+ with name "chroot_deny_unix" is created.
55884+
55885+config GRKERNSEC_CHROOT_FINDTASK
55886+ bool "Protect outside processes"
55887+ depends on GRKERNSEC_CHROOT
55888+ help
55889+ If you say Y here, processes inside a chroot will not be able to
55890+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
55891+ getsid, or view any process outside of the chroot. If the sysctl
55892+ option is enabled, a sysctl option with name "chroot_findtask" is
55893+ created.
55894+
55895+config GRKERNSEC_CHROOT_NICE
55896+ bool "Restrict priority changes"
55897+ depends on GRKERNSEC_CHROOT
55898+ help
55899+ If you say Y here, processes inside a chroot will not be able to raise
55900+ the priority of processes in the chroot, or alter the priority of
55901+ processes outside the chroot. This provides more security than simply
55902+ removing CAP_SYS_NICE from the process' capability set. If the
55903+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
55904+ is created.
55905+
55906+config GRKERNSEC_CHROOT_SYSCTL
55907+ bool "Deny sysctl writes"
55908+ depends on GRKERNSEC_CHROOT
55909+ help
55910+ If you say Y here, an attacker in a chroot will not be able to
55911+ write to sysctl entries, either by sysctl(2) or through a /proc
55912+ interface. It is strongly recommended that you say Y here. If the
55913+ sysctl option is enabled, a sysctl option with name
55914+ "chroot_deny_sysctl" is created.
55915+
55916+config GRKERNSEC_CHROOT_CAPS
55917+ bool "Capability restrictions"
55918+ depends on GRKERNSEC_CHROOT
55919+ help
55920+ If you say Y here, the capabilities on all root processes within a
55921+ chroot jail will be lowered to stop module insertion, raw i/o,
55922+ system and net admin tasks, rebooting the system, modifying immutable
55923+ files, modifying IPC owned by another, and changing the system time.
55924+ This is left an option because it can break some apps. Disable this
55925+ if your chrooted apps are having problems performing those kinds of
55926+ tasks. If the sysctl option is enabled, a sysctl option with
55927+ name "chroot_caps" is created.
55928+
55929+endmenu
55930+menu "Kernel Auditing"
55931+depends on GRKERNSEC
55932+
55933+config GRKERNSEC_AUDIT_GROUP
55934+ bool "Single group for auditing"
55935+ help
55936+ If you say Y here, the exec, chdir, and (un)mount logging features
55937+ will only operate on a group you specify. This option is recommended
55938+ if you only want to watch certain users instead of having a large
55939+ amount of logs from the entire system. If the sysctl option is enabled,
55940+ a sysctl option with name "audit_group" is created.
55941+
55942+config GRKERNSEC_AUDIT_GID
55943+ int "GID for auditing"
55944+ depends on GRKERNSEC_AUDIT_GROUP
55945+ default 1007
55946+
55947+config GRKERNSEC_EXECLOG
55948+ bool "Exec logging"
55949+ help
55950+ If you say Y here, all execve() calls will be logged (since the
55951+ other exec*() calls are frontends to execve(), all execution
55952+ will be logged). Useful for shell-servers that like to keep track
55953+ of their users. If the sysctl option is enabled, a sysctl option with
55954+ name "exec_logging" is created.
55955+ WARNING: This option when enabled will produce a LOT of logs, especially
55956+ on an active system.
55957+
55958+config GRKERNSEC_RESLOG
55959+ bool "Resource logging"
55960+ help
55961+ If you say Y here, all attempts to overstep resource limits will
55962+ be logged with the resource name, the requested size, and the current
55963+ limit. It is highly recommended that you say Y here. If the sysctl
55964+ option is enabled, a sysctl option with name "resource_logging" is
55965+ created. If the RBAC system is enabled, the sysctl value is ignored.
55966+
55967+config GRKERNSEC_CHROOT_EXECLOG
55968+ bool "Log execs within chroot"
55969+ help
55970+ If you say Y here, all executions inside a chroot jail will be logged
55971+ to syslog. This can cause a large amount of logs if certain
55972+ applications (eg. djb's daemontools) are installed on the system, and
55973+ is therefore left as an option. If the sysctl option is enabled, a
55974+ sysctl option with name "chroot_execlog" is created.
55975+
55976+config GRKERNSEC_AUDIT_PTRACE
55977+ bool "Ptrace logging"
55978+ help
55979+ If you say Y here, all attempts to attach to a process via ptrace
55980+ will be logged. If the sysctl option is enabled, a sysctl option
55981+ with name "audit_ptrace" is created.
55982+
55983+config GRKERNSEC_AUDIT_CHDIR
55984+ bool "Chdir logging"
55985+ help
55986+ If you say Y here, all chdir() calls will be logged. If the sysctl
55987+ option is enabled, a sysctl option with name "audit_chdir" is created.
55988+
55989+config GRKERNSEC_AUDIT_MOUNT
55990+ bool "(Un)Mount logging"
55991+ help
55992+ If you say Y here, all mounts and unmounts will be logged. If the
55993+ sysctl option is enabled, a sysctl option with name "audit_mount" is
55994+ created.
55995+
55996+config GRKERNSEC_SIGNAL
55997+ bool "Signal logging"
55998+ help
55999+ If you say Y here, certain important signals will be logged, such as
56000+ SIGSEGV, which will as a result inform you of when a error in a program
56001+ occurred, which in some cases could mean a possible exploit attempt.
56002+ If the sysctl option is enabled, a sysctl option with name
56003+ "signal_logging" is created.
56004+
56005+config GRKERNSEC_FORKFAIL
56006+ bool "Fork failure logging"
56007+ help
56008+ If you say Y here, all failed fork() attempts will be logged.
56009+ This could suggest a fork bomb, or someone attempting to overstep
56010+ their process limit. If the sysctl option is enabled, a sysctl option
56011+ with name "forkfail_logging" is created.
56012+
56013+config GRKERNSEC_TIME
56014+ bool "Time change logging"
56015+ help
56016+ If you say Y here, any changes of the system clock will be logged.
56017+ If the sysctl option is enabled, a sysctl option with name
56018+ "timechange_logging" is created.
56019+
56020+config GRKERNSEC_PROC_IPADDR
56021+ bool "/proc/<pid>/ipaddr support"
56022+ help
56023+ If you say Y here, a new entry will be added to each /proc/<pid>
56024+ directory that contains the IP address of the person using the task.
56025+ The IP is carried across local TCP and AF_UNIX stream sockets.
56026+ This information can be useful for IDS/IPSes to perform remote response
56027+ to a local attack. The entry is readable by only the owner of the
56028+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56029+ the RBAC system), and thus does not create privacy concerns.
56030+
56031+config GRKERNSEC_RWXMAP_LOG
56032+ bool 'Denied RWX mmap/mprotect logging'
56033+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56034+ help
56035+ If you say Y here, calls to mmap() and mprotect() with explicit
56036+ usage of PROT_WRITE and PROT_EXEC together will be logged when
56037+ denied by the PAX_MPROTECT feature. If the sysctl option is
56038+ enabled, a sysctl option with name "rwxmap_logging" is created.
56039+
56040+config GRKERNSEC_AUDIT_TEXTREL
56041+ bool 'ELF text relocations logging (READ HELP)'
56042+ depends on PAX_MPROTECT
56043+ help
56044+ If you say Y here, text relocations will be logged with the filename
56045+ of the offending library or binary. The purpose of the feature is
56046+ to help Linux distribution developers get rid of libraries and
56047+ binaries that need text relocations which hinder the future progress
56048+ of PaX. Only Linux distribution developers should say Y here, and
56049+ never on a production machine, as this option creates an information
56050+ leak that could aid an attacker in defeating the randomization of
56051+ a single memory region. If the sysctl option is enabled, a sysctl
56052+ option with name "audit_textrel" is created.
56053+
56054+endmenu
56055+
56056+menu "Executable Protections"
56057+depends on GRKERNSEC
56058+
56059+config GRKERNSEC_DMESG
56060+ bool "Dmesg(8) restriction"
56061+ help
56062+ If you say Y here, non-root users will not be able to use dmesg(8)
56063+ to view up to the last 4kb of messages in the kernel's log buffer.
56064+ The kernel's log buffer often contains kernel addresses and other
56065+ identifying information useful to an attacker in fingerprinting a
56066+ system for a targeted exploit.
56067+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
56068+ created.
56069+
56070+config GRKERNSEC_HARDEN_PTRACE
56071+ bool "Deter ptrace-based process snooping"
56072+ help
56073+ If you say Y here, TTY sniffers and other malicious monitoring
56074+ programs implemented through ptrace will be defeated. If you
56075+ have been using the RBAC system, this option has already been
56076+ enabled for several years for all users, with the ability to make
56077+ fine-grained exceptions.
56078+
56079+ This option only affects the ability of non-root users to ptrace
56080+ processes that are not a descendent of the ptracing process.
56081+ This means that strace ./binary and gdb ./binary will still work,
56082+ but attaching to arbitrary processes will not. If the sysctl
56083+ option is enabled, a sysctl option with name "harden_ptrace" is
56084+ created.
56085+
56086+config GRKERNSEC_TPE
56087+ bool "Trusted Path Execution (TPE)"
56088+ help
56089+ If you say Y here, you will be able to choose a gid to add to the
56090+ supplementary groups of users you want to mark as "untrusted."
56091+ These users will not be able to execute any files that are not in
56092+ root-owned directories writable only by root. If the sysctl option
56093+ is enabled, a sysctl option with name "tpe" is created.
56094+
56095+config GRKERNSEC_TPE_ALL
56096+ bool "Partially restrict all non-root users"
56097+ depends on GRKERNSEC_TPE
56098+ help
56099+ If you say Y here, all non-root users will be covered under
56100+ a weaker TPE restriction. This is separate from, and in addition to,
56101+ the main TPE options that you have selected elsewhere. Thus, if a
56102+ "trusted" GID is chosen, this restriction applies to even that GID.
56103+ Under this restriction, all non-root users will only be allowed to
56104+ execute files in directories they own that are not group or
56105+ world-writable, or in directories owned by root and writable only by
56106+ root. If the sysctl option is enabled, a sysctl option with name
56107+ "tpe_restrict_all" is created.
56108+
56109+config GRKERNSEC_TPE_INVERT
56110+ bool "Invert GID option"
56111+ depends on GRKERNSEC_TPE
56112+ help
56113+ If you say Y here, the group you specify in the TPE configuration will
56114+ decide what group TPE restrictions will be *disabled* for. This
56115+ option is useful if you want TPE restrictions to be applied to most
56116+ users on the system. If the sysctl option is enabled, a sysctl option
56117+ with name "tpe_invert" is created. Unlike other sysctl options, this
56118+ entry will default to on for backward-compatibility.
56119+
56120+config GRKERNSEC_TPE_GID
56121+ int "GID for untrusted users"
56122+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56123+ default 1005
56124+ help
56125+ Setting this GID determines what group TPE restrictions will be
56126+ *enabled* for. If the sysctl option is enabled, a sysctl option
56127+ with name "tpe_gid" is created.
56128+
56129+config GRKERNSEC_TPE_GID
56130+ int "GID for trusted users"
56131+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56132+ default 1005
56133+ help
56134+ Setting this GID determines what group TPE restrictions will be
56135+ *disabled* for. If the sysctl option is enabled, a sysctl option
56136+ with name "tpe_gid" is created.
56137+
56138+endmenu
56139+menu "Network Protections"
56140+depends on GRKERNSEC
56141+
56142+config GRKERNSEC_RANDNET
56143+ bool "Larger entropy pools"
56144+ help
56145+ If you say Y here, the entropy pools used for many features of Linux
56146+ and grsecurity will be doubled in size. Since several grsecurity
56147+ features use additional randomness, it is recommended that you say Y
56148+ here. Saying Y here has a similar effect as modifying
56149+ /proc/sys/kernel/random/poolsize.
56150+
56151+config GRKERNSEC_BLACKHOLE
56152+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56153+ depends on NET
56154+ help
56155+ If you say Y here, neither TCP resets nor ICMP
56156+ destination-unreachable packets will be sent in response to packets
56157+ sent to ports for which no associated listening process exists.
56158+ This feature supports both IPV4 and IPV6 and exempts the
56159+ loopback interface from blackholing. Enabling this feature
56160+ makes a host more resilient to DoS attacks and reduces network
56161+ visibility against scanners.
56162+
56163+ The blackhole feature as-implemented is equivalent to the FreeBSD
56164+ blackhole feature, as it prevents RST responses to all packets, not
56165+ just SYNs. Under most application behavior this causes no
56166+ problems, but applications (like haproxy) may not close certain
56167+ connections in a way that cleanly terminates them on the remote
56168+ end, leaving the remote host in LAST_ACK state. Because of this
56169+ side-effect and to prevent intentional LAST_ACK DoSes, this
56170+ feature also adds automatic mitigation against such attacks.
56171+ The mitigation drastically reduces the amount of time a socket
56172+ can spend in LAST_ACK state. If you're using haproxy and not
56173+ all servers it connects to have this option enabled, consider
56174+ disabling this feature on the haproxy host.
56175+
56176+ If the sysctl option is enabled, two sysctl options with names
56177+ "ip_blackhole" and "lastack_retries" will be created.
56178+ While "ip_blackhole" takes the standard zero/non-zero on/off
56179+ toggle, "lastack_retries" uses the same kinds of values as
56180+ "tcp_retries1" and "tcp_retries2". The default value of 4
56181+ prevents a socket from lasting more than 45 seconds in LAST_ACK
56182+ state.
56183+
56184+config GRKERNSEC_SOCKET
56185+ bool "Socket restrictions"
56186+ depends on NET
56187+ help
56188+ If you say Y here, you will be able to choose from several options.
56189+ If you assign a GID on your system and add it to the supplementary
56190+ groups of users you want to restrict socket access to, this patch
56191+ will perform up to three things, based on the option(s) you choose.
56192+
56193+config GRKERNSEC_SOCKET_ALL
56194+ bool "Deny any sockets to group"
56195+ depends on GRKERNSEC_SOCKET
56196+ help
56197+ If you say Y here, you will be able to choose a GID of whose users will
56198+ be unable to connect to other hosts from your machine or run server
56199+ applications from your machine. If the sysctl option is enabled, a
56200+ sysctl option with name "socket_all" is created.
56201+
56202+config GRKERNSEC_SOCKET_ALL_GID
56203+ int "GID to deny all sockets for"
56204+ depends on GRKERNSEC_SOCKET_ALL
56205+ default 1004
56206+ help
56207+ Here you can choose the GID to disable socket access for. Remember to
56208+ add the users you want socket access disabled for to the GID
56209+ specified here. If the sysctl option is enabled, a sysctl option
56210+ with name "socket_all_gid" is created.
56211+
56212+config GRKERNSEC_SOCKET_CLIENT
56213+ bool "Deny client sockets to group"
56214+ depends on GRKERNSEC_SOCKET
56215+ help
56216+ If you say Y here, you will be able to choose a GID of whose users will
56217+ be unable to connect to other hosts from your machine, but will be
56218+ able to run servers. If this option is enabled, all users in the group
56219+ you specify will have to use passive mode when initiating ftp transfers
56220+ from the shell on your machine. If the sysctl option is enabled, a
56221+ sysctl option with name "socket_client" is created.
56222+
56223+config GRKERNSEC_SOCKET_CLIENT_GID
56224+ int "GID to deny client sockets for"
56225+ depends on GRKERNSEC_SOCKET_CLIENT
56226+ default 1003
56227+ help
56228+ Here you can choose the GID to disable client socket access for.
56229+ Remember to add the users you want client socket access disabled for to
56230+ the GID specified here. If the sysctl option is enabled, a sysctl
56231+ option with name "socket_client_gid" is created.
56232+
56233+config GRKERNSEC_SOCKET_SERVER
56234+ bool "Deny server sockets to group"
56235+ depends on GRKERNSEC_SOCKET
56236+ help
56237+ If you say Y here, you will be able to choose a GID of whose users will
56238+ be unable to run server applications from your machine. If the sysctl
56239+ option is enabled, a sysctl option with name "socket_server" is created.
56240+
56241+config GRKERNSEC_SOCKET_SERVER_GID
56242+ int "GID to deny server sockets for"
56243+ depends on GRKERNSEC_SOCKET_SERVER
56244+ default 1002
56245+ help
56246+ Here you can choose the GID to disable server socket access for.
56247+ Remember to add the users you want server socket access disabled for to
56248+ the GID specified here. If the sysctl option is enabled, a sysctl
56249+ option with name "socket_server_gid" is created.
56250+
56251+endmenu
56252+menu "Sysctl support"
56253+depends on GRKERNSEC && SYSCTL
56254+
56255+config GRKERNSEC_SYSCTL
56256+ bool "Sysctl support"
56257+ help
56258+ If you say Y here, you will be able to change the options that
56259+ grsecurity runs with at bootup, without having to recompile your
56260+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56261+ to enable (1) or disable (0) various features. All the sysctl entries
56262+ are mutable until the "grsec_lock" entry is set to a non-zero value.
56263+ All features enabled in the kernel configuration are disabled at boot
56264+ if you do not say Y to the "Turn on features by default" option.
56265+ All options should be set at startup, and the grsec_lock entry should
56266+ be set to a non-zero value after all the options are set.
56267+ *THIS IS EXTREMELY IMPORTANT*
56268+
56269+config GRKERNSEC_SYSCTL_DISTRO
56270+ bool "Extra sysctl support for distro makers (READ HELP)"
56271+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56272+ help
56273+ If you say Y here, additional sysctl options will be created
56274+ for features that affect processes running as root. Therefore,
56275+ it is critical when using this option that the grsec_lock entry be
56276+ enabled after boot. Only distros with prebuilt kernel packages
56277+ with this option enabled that can ensure grsec_lock is enabled
56278+ after boot should use this option.
56279+ *Failure to set grsec_lock after boot makes all grsec features
56280+ this option covers useless*
56281+
56282+ Currently this option creates the following sysctl entries:
56283+ "Disable Privileged I/O": "disable_priv_io"
56284+
56285+config GRKERNSEC_SYSCTL_ON
56286+ bool "Turn on features by default"
56287+ depends on GRKERNSEC_SYSCTL
56288+ help
56289+ If you say Y here, instead of having all features enabled in the
56290+ kernel configuration disabled at boot time, the features will be
56291+ enabled at boot time. It is recommended you say Y here unless
56292+ there is some reason you would want all sysctl-tunable features to
56293+ be disabled by default. As mentioned elsewhere, it is important
56294+ to enable the grsec_lock entry once you have finished modifying
56295+ the sysctl entries.
56296+
56297+endmenu
56298+menu "Logging Options"
56299+depends on GRKERNSEC
56300+
56301+config GRKERNSEC_FLOODTIME
56302+ int "Seconds in between log messages (minimum)"
56303+ default 10
56304+ help
56305+ This option allows you to enforce the number of seconds between
56306+ grsecurity log messages. The default should be suitable for most
56307+ people, however, if you choose to change it, choose a value small enough
56308+ to allow informative logs to be produced, but large enough to
56309+ prevent flooding.
56310+
56311+config GRKERNSEC_FLOODBURST
56312+ int "Number of messages in a burst (maximum)"
56313+ default 4
56314+ help
56315+ This option allows you to choose the maximum number of messages allowed
56316+ within the flood time interval you chose in a separate option. The
56317+ default should be suitable for most people, however if you find that
56318+ many of your logs are being interpreted as flooding, you may want to
56319+ raise this value.
56320+
56321+endmenu
56322+
56323+endmenu
56324diff -urNp linux-2.6.32.46/grsecurity/Makefile linux-2.6.32.46/grsecurity/Makefile
56325--- linux-2.6.32.46/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
56326+++ linux-2.6.32.46/grsecurity/Makefile 2011-08-21 18:54:34.000000000 -0400
56327@@ -0,0 +1,34 @@
56328+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56329+# during 2001-2009 it has been completely redesigned by Brad Spengler
56330+# into an RBAC system
56331+#
56332+# All code in this directory and various hooks inserted throughout the kernel
56333+# are copyright Brad Spengler - Open Source Security, Inc., and released
56334+# under the GPL v2 or higher
56335+
56336+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56337+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
56338+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56339+
56340+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56341+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56342+ gracl_learn.o grsec_log.o
56343+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56344+
56345+ifdef CONFIG_NET
56346+obj-y += grsec_sock.o
56347+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56348+endif
56349+
56350+ifndef CONFIG_GRKERNSEC
56351+obj-y += grsec_disabled.o
56352+endif
56353+
56354+ifdef CONFIG_GRKERNSEC_HIDESYM
56355+extra-y := grsec_hidesym.o
56356+$(obj)/grsec_hidesym.o:
56357+ @-chmod -f 500 /boot
56358+ @-chmod -f 500 /lib/modules
56359+ @-chmod -f 700 .
56360+ @echo ' grsec: protected kernel image paths'
56361+endif
56362diff -urNp linux-2.6.32.46/include/acpi/acpi_bus.h linux-2.6.32.46/include/acpi/acpi_bus.h
56363--- linux-2.6.32.46/include/acpi/acpi_bus.h 2011-03-27 14:31:47.000000000 -0400
56364+++ linux-2.6.32.46/include/acpi/acpi_bus.h 2011-08-05 20:33:55.000000000 -0400
56365@@ -107,7 +107,7 @@ struct acpi_device_ops {
56366 acpi_op_bind bind;
56367 acpi_op_unbind unbind;
56368 acpi_op_notify notify;
56369-};
56370+} __no_const;
56371
56372 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
56373
56374diff -urNp linux-2.6.32.46/include/acpi/acpi_drivers.h linux-2.6.32.46/include/acpi/acpi_drivers.h
56375--- linux-2.6.32.46/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
56376+++ linux-2.6.32.46/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
56377@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
56378 Dock Station
56379 -------------------------------------------------------------------------- */
56380 struct acpi_dock_ops {
56381- acpi_notify_handler handler;
56382- acpi_notify_handler uevent;
56383+ const acpi_notify_handler handler;
56384+ const acpi_notify_handler uevent;
56385 };
56386
56387 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
56388@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
56389 extern int register_dock_notifier(struct notifier_block *nb);
56390 extern void unregister_dock_notifier(struct notifier_block *nb);
56391 extern int register_hotplug_dock_device(acpi_handle handle,
56392- struct acpi_dock_ops *ops,
56393+ const struct acpi_dock_ops *ops,
56394 void *context);
56395 extern void unregister_hotplug_dock_device(acpi_handle handle);
56396 #else
56397@@ -144,7 +144,7 @@ static inline void unregister_dock_notif
56398 {
56399 }
56400 static inline int register_hotplug_dock_device(acpi_handle handle,
56401- struct acpi_dock_ops *ops,
56402+ const struct acpi_dock_ops *ops,
56403 void *context)
56404 {
56405 return -ENODEV;
56406diff -urNp linux-2.6.32.46/include/asm-generic/atomic-long.h linux-2.6.32.46/include/asm-generic/atomic-long.h
56407--- linux-2.6.32.46/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
56408+++ linux-2.6.32.46/include/asm-generic/atomic-long.h 2011-07-13 22:21:25.000000000 -0400
56409@@ -22,6 +22,12 @@
56410
56411 typedef atomic64_t atomic_long_t;
56412
56413+#ifdef CONFIG_PAX_REFCOUNT
56414+typedef atomic64_unchecked_t atomic_long_unchecked_t;
56415+#else
56416+typedef atomic64_t atomic_long_unchecked_t;
56417+#endif
56418+
56419 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
56420
56421 static inline long atomic_long_read(atomic_long_t *l)
56422@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
56423 return (long)atomic64_read(v);
56424 }
56425
56426+#ifdef CONFIG_PAX_REFCOUNT
56427+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56428+{
56429+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56430+
56431+ return (long)atomic64_read_unchecked(v);
56432+}
56433+#endif
56434+
56435 static inline void atomic_long_set(atomic_long_t *l, long i)
56436 {
56437 atomic64_t *v = (atomic64_t *)l;
56438@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
56439 atomic64_set(v, i);
56440 }
56441
56442+#ifdef CONFIG_PAX_REFCOUNT
56443+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56444+{
56445+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56446+
56447+ atomic64_set_unchecked(v, i);
56448+}
56449+#endif
56450+
56451 static inline void atomic_long_inc(atomic_long_t *l)
56452 {
56453 atomic64_t *v = (atomic64_t *)l;
56454@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
56455 atomic64_inc(v);
56456 }
56457
56458+#ifdef CONFIG_PAX_REFCOUNT
56459+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56460+{
56461+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56462+
56463+ atomic64_inc_unchecked(v);
56464+}
56465+#endif
56466+
56467 static inline void atomic_long_dec(atomic_long_t *l)
56468 {
56469 atomic64_t *v = (atomic64_t *)l;
56470@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
56471 atomic64_dec(v);
56472 }
56473
56474+#ifdef CONFIG_PAX_REFCOUNT
56475+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56476+{
56477+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56478+
56479+ atomic64_dec_unchecked(v);
56480+}
56481+#endif
56482+
56483 static inline void atomic_long_add(long i, atomic_long_t *l)
56484 {
56485 atomic64_t *v = (atomic64_t *)l;
56486@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
56487 atomic64_add(i, v);
56488 }
56489
56490+#ifdef CONFIG_PAX_REFCOUNT
56491+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56492+{
56493+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56494+
56495+ atomic64_add_unchecked(i, v);
56496+}
56497+#endif
56498+
56499 static inline void atomic_long_sub(long i, atomic_long_t *l)
56500 {
56501 atomic64_t *v = (atomic64_t *)l;
56502@@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
56503 return (long)atomic64_inc_return(v);
56504 }
56505
56506+#ifdef CONFIG_PAX_REFCOUNT
56507+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56508+{
56509+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56510+
56511+ return (long)atomic64_inc_return_unchecked(v);
56512+}
56513+#endif
56514+
56515 static inline long atomic_long_dec_return(atomic_long_t *l)
56516 {
56517 atomic64_t *v = (atomic64_t *)l;
56518@@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
56519
56520 typedef atomic_t atomic_long_t;
56521
56522+#ifdef CONFIG_PAX_REFCOUNT
56523+typedef atomic_unchecked_t atomic_long_unchecked_t;
56524+#else
56525+typedef atomic_t atomic_long_unchecked_t;
56526+#endif
56527+
56528 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
56529 static inline long atomic_long_read(atomic_long_t *l)
56530 {
56531@@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
56532 return (long)atomic_read(v);
56533 }
56534
56535+#ifdef CONFIG_PAX_REFCOUNT
56536+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56537+{
56538+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56539+
56540+ return (long)atomic_read_unchecked(v);
56541+}
56542+#endif
56543+
56544 static inline void atomic_long_set(atomic_long_t *l, long i)
56545 {
56546 atomic_t *v = (atomic_t *)l;
56547@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
56548 atomic_set(v, i);
56549 }
56550
56551+#ifdef CONFIG_PAX_REFCOUNT
56552+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56553+{
56554+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56555+
56556+ atomic_set_unchecked(v, i);
56557+}
56558+#endif
56559+
56560 static inline void atomic_long_inc(atomic_long_t *l)
56561 {
56562 atomic_t *v = (atomic_t *)l;
56563@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
56564 atomic_inc(v);
56565 }
56566
56567+#ifdef CONFIG_PAX_REFCOUNT
56568+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56569+{
56570+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56571+
56572+ atomic_inc_unchecked(v);
56573+}
56574+#endif
56575+
56576 static inline void atomic_long_dec(atomic_long_t *l)
56577 {
56578 atomic_t *v = (atomic_t *)l;
56579@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
56580 atomic_dec(v);
56581 }
56582
56583+#ifdef CONFIG_PAX_REFCOUNT
56584+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56585+{
56586+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56587+
56588+ atomic_dec_unchecked(v);
56589+}
56590+#endif
56591+
56592 static inline void atomic_long_add(long i, atomic_long_t *l)
56593 {
56594 atomic_t *v = (atomic_t *)l;
56595@@ -176,6 +278,15 @@ static inline void atomic_long_add(long
56596 atomic_add(i, v);
56597 }
56598
56599+#ifdef CONFIG_PAX_REFCOUNT
56600+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56601+{
56602+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56603+
56604+ atomic_add_unchecked(i, v);
56605+}
56606+#endif
56607+
56608 static inline void atomic_long_sub(long i, atomic_long_t *l)
56609 {
56610 atomic_t *v = (atomic_t *)l;
56611@@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
56612 return (long)atomic_inc_return(v);
56613 }
56614
56615+#ifdef CONFIG_PAX_REFCOUNT
56616+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56617+{
56618+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56619+
56620+ return (long)atomic_inc_return_unchecked(v);
56621+}
56622+#endif
56623+
56624 static inline long atomic_long_dec_return(atomic_long_t *l)
56625 {
56626 atomic_t *v = (atomic_t *)l;
56627@@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
56628
56629 #endif /* BITS_PER_LONG == 64 */
56630
56631+#ifdef CONFIG_PAX_REFCOUNT
56632+static inline void pax_refcount_needs_these_functions(void)
56633+{
56634+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
56635+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
56636+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
56637+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
56638+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
56639+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
56640+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
56641+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
56642+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
56643+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
56644+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
56645+
56646+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
56647+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
56648+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
56649+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
56650+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
56651+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
56652+}
56653+#else
56654+#define atomic_read_unchecked(v) atomic_read(v)
56655+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
56656+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
56657+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
56658+#define atomic_inc_unchecked(v) atomic_inc(v)
56659+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
56660+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
56661+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
56662+#define atomic_dec_unchecked(v) atomic_dec(v)
56663+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
56664+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
56665+
56666+#define atomic_long_read_unchecked(v) atomic_long_read(v)
56667+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
56668+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
56669+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
56670+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
56671+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
56672+#endif
56673+
56674 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
56675diff -urNp linux-2.6.32.46/include/asm-generic/bug.h linux-2.6.32.46/include/asm-generic/bug.h
56676--- linux-2.6.32.46/include/asm-generic/bug.h 2011-07-13 17:23:04.000000000 -0400
56677+++ linux-2.6.32.46/include/asm-generic/bug.h 2011-08-21 17:56:07.000000000 -0400
56678@@ -105,11 +105,11 @@ extern void warn_slowpath_null(const cha
56679
56680 #else /* !CONFIG_BUG */
56681 #ifndef HAVE_ARCH_BUG
56682-#define BUG() do {} while(0)
56683+#define BUG() do { for (;;) ; } while(0)
56684 #endif
56685
56686 #ifndef HAVE_ARCH_BUG_ON
56687-#define BUG_ON(condition) do { if (condition) ; } while(0)
56688+#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
56689 #endif
56690
56691 #ifndef HAVE_ARCH_WARN_ON
56692diff -urNp linux-2.6.32.46/include/asm-generic/cache.h linux-2.6.32.46/include/asm-generic/cache.h
56693--- linux-2.6.32.46/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
56694+++ linux-2.6.32.46/include/asm-generic/cache.h 2011-07-06 19:53:33.000000000 -0400
56695@@ -6,7 +6,7 @@
56696 * cache lines need to provide their own cache.h.
56697 */
56698
56699-#define L1_CACHE_SHIFT 5
56700-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
56701+#define L1_CACHE_SHIFT 5UL
56702+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
56703
56704 #endif /* __ASM_GENERIC_CACHE_H */
56705diff -urNp linux-2.6.32.46/include/asm-generic/dma-mapping-common.h linux-2.6.32.46/include/asm-generic/dma-mapping-common.h
56706--- linux-2.6.32.46/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
56707+++ linux-2.6.32.46/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
56708@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
56709 enum dma_data_direction dir,
56710 struct dma_attrs *attrs)
56711 {
56712- struct dma_map_ops *ops = get_dma_ops(dev);
56713+ const struct dma_map_ops *ops = get_dma_ops(dev);
56714 dma_addr_t addr;
56715
56716 kmemcheck_mark_initialized(ptr, size);
56717@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
56718 enum dma_data_direction dir,
56719 struct dma_attrs *attrs)
56720 {
56721- struct dma_map_ops *ops = get_dma_ops(dev);
56722+ const struct dma_map_ops *ops = get_dma_ops(dev);
56723
56724 BUG_ON(!valid_dma_direction(dir));
56725 if (ops->unmap_page)
56726@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
56727 int nents, enum dma_data_direction dir,
56728 struct dma_attrs *attrs)
56729 {
56730- struct dma_map_ops *ops = get_dma_ops(dev);
56731+ const struct dma_map_ops *ops = get_dma_ops(dev);
56732 int i, ents;
56733 struct scatterlist *s;
56734
56735@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
56736 int nents, enum dma_data_direction dir,
56737 struct dma_attrs *attrs)
56738 {
56739- struct dma_map_ops *ops = get_dma_ops(dev);
56740+ const struct dma_map_ops *ops = get_dma_ops(dev);
56741
56742 BUG_ON(!valid_dma_direction(dir));
56743 debug_dma_unmap_sg(dev, sg, nents, dir);
56744@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
56745 size_t offset, size_t size,
56746 enum dma_data_direction dir)
56747 {
56748- struct dma_map_ops *ops = get_dma_ops(dev);
56749+ const struct dma_map_ops *ops = get_dma_ops(dev);
56750 dma_addr_t addr;
56751
56752 kmemcheck_mark_initialized(page_address(page) + offset, size);
56753@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
56754 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
56755 size_t size, enum dma_data_direction dir)
56756 {
56757- struct dma_map_ops *ops = get_dma_ops(dev);
56758+ const struct dma_map_ops *ops = get_dma_ops(dev);
56759
56760 BUG_ON(!valid_dma_direction(dir));
56761 if (ops->unmap_page)
56762@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
56763 size_t size,
56764 enum dma_data_direction dir)
56765 {
56766- struct dma_map_ops *ops = get_dma_ops(dev);
56767+ const struct dma_map_ops *ops = get_dma_ops(dev);
56768
56769 BUG_ON(!valid_dma_direction(dir));
56770 if (ops->sync_single_for_cpu)
56771@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
56772 dma_addr_t addr, size_t size,
56773 enum dma_data_direction dir)
56774 {
56775- struct dma_map_ops *ops = get_dma_ops(dev);
56776+ const struct dma_map_ops *ops = get_dma_ops(dev);
56777
56778 BUG_ON(!valid_dma_direction(dir));
56779 if (ops->sync_single_for_device)
56780@@ -123,7 +123,7 @@ static inline void dma_sync_single_range
56781 size_t size,
56782 enum dma_data_direction dir)
56783 {
56784- struct dma_map_ops *ops = get_dma_ops(dev);
56785+ const struct dma_map_ops *ops = get_dma_ops(dev);
56786
56787 BUG_ON(!valid_dma_direction(dir));
56788 if (ops->sync_single_range_for_cpu) {
56789@@ -140,7 +140,7 @@ static inline void dma_sync_single_range
56790 size_t size,
56791 enum dma_data_direction dir)
56792 {
56793- struct dma_map_ops *ops = get_dma_ops(dev);
56794+ const struct dma_map_ops *ops = get_dma_ops(dev);
56795
56796 BUG_ON(!valid_dma_direction(dir));
56797 if (ops->sync_single_range_for_device) {
56798@@ -155,7 +155,7 @@ static inline void
56799 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
56800 int nelems, enum dma_data_direction dir)
56801 {
56802- struct dma_map_ops *ops = get_dma_ops(dev);
56803+ const struct dma_map_ops *ops = get_dma_ops(dev);
56804
56805 BUG_ON(!valid_dma_direction(dir));
56806 if (ops->sync_sg_for_cpu)
56807@@ -167,7 +167,7 @@ static inline void
56808 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
56809 int nelems, enum dma_data_direction dir)
56810 {
56811- struct dma_map_ops *ops = get_dma_ops(dev);
56812+ const struct dma_map_ops *ops = get_dma_ops(dev);
56813
56814 BUG_ON(!valid_dma_direction(dir));
56815 if (ops->sync_sg_for_device)
56816diff -urNp linux-2.6.32.46/include/asm-generic/emergency-restart.h linux-2.6.32.46/include/asm-generic/emergency-restart.h
56817--- linux-2.6.32.46/include/asm-generic/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
56818+++ linux-2.6.32.46/include/asm-generic/emergency-restart.h 2011-08-21 19:17:17.000000000 -0400
56819@@ -1,7 +1,7 @@
56820 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
56821 #define _ASM_GENERIC_EMERGENCY_RESTART_H
56822
56823-static inline void machine_emergency_restart(void)
56824+static inline __noreturn void machine_emergency_restart(void)
56825 {
56826 machine_restart(NULL);
56827 }
56828diff -urNp linux-2.6.32.46/include/asm-generic/futex.h linux-2.6.32.46/include/asm-generic/futex.h
56829--- linux-2.6.32.46/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
56830+++ linux-2.6.32.46/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
56831@@ -6,7 +6,7 @@
56832 #include <asm/errno.h>
56833
56834 static inline int
56835-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
56836+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
56837 {
56838 int op = (encoded_op >> 28) & 7;
56839 int cmp = (encoded_op >> 24) & 15;
56840@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
56841 }
56842
56843 static inline int
56844-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
56845+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
56846 {
56847 return -ENOSYS;
56848 }
56849diff -urNp linux-2.6.32.46/include/asm-generic/int-l64.h linux-2.6.32.46/include/asm-generic/int-l64.h
56850--- linux-2.6.32.46/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
56851+++ linux-2.6.32.46/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
56852@@ -46,6 +46,8 @@ typedef unsigned int u32;
56853 typedef signed long s64;
56854 typedef unsigned long u64;
56855
56856+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
56857+
56858 #define S8_C(x) x
56859 #define U8_C(x) x ## U
56860 #define S16_C(x) x
56861diff -urNp linux-2.6.32.46/include/asm-generic/int-ll64.h linux-2.6.32.46/include/asm-generic/int-ll64.h
56862--- linux-2.6.32.46/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
56863+++ linux-2.6.32.46/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
56864@@ -51,6 +51,8 @@ typedef unsigned int u32;
56865 typedef signed long long s64;
56866 typedef unsigned long long u64;
56867
56868+typedef unsigned long long intoverflow_t;
56869+
56870 #define S8_C(x) x
56871 #define U8_C(x) x ## U
56872 #define S16_C(x) x
56873diff -urNp linux-2.6.32.46/include/asm-generic/kmap_types.h linux-2.6.32.46/include/asm-generic/kmap_types.h
56874--- linux-2.6.32.46/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
56875+++ linux-2.6.32.46/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
56876@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
56877 KMAP_D(16) KM_IRQ_PTE,
56878 KMAP_D(17) KM_NMI,
56879 KMAP_D(18) KM_NMI_PTE,
56880-KMAP_D(19) KM_TYPE_NR
56881+KMAP_D(19) KM_CLEARPAGE,
56882+KMAP_D(20) KM_TYPE_NR
56883 };
56884
56885 #undef KMAP_D
56886diff -urNp linux-2.6.32.46/include/asm-generic/pgtable.h linux-2.6.32.46/include/asm-generic/pgtable.h
56887--- linux-2.6.32.46/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
56888+++ linux-2.6.32.46/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
56889@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
56890 unsigned long size);
56891 #endif
56892
56893+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
56894+static inline unsigned long pax_open_kernel(void) { return 0; }
56895+#endif
56896+
56897+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
56898+static inline unsigned long pax_close_kernel(void) { return 0; }
56899+#endif
56900+
56901 #endif /* !__ASSEMBLY__ */
56902
56903 #endif /* _ASM_GENERIC_PGTABLE_H */
56904diff -urNp linux-2.6.32.46/include/asm-generic/pgtable-nopmd.h linux-2.6.32.46/include/asm-generic/pgtable-nopmd.h
56905--- linux-2.6.32.46/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
56906+++ linux-2.6.32.46/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
56907@@ -1,14 +1,19 @@
56908 #ifndef _PGTABLE_NOPMD_H
56909 #define _PGTABLE_NOPMD_H
56910
56911-#ifndef __ASSEMBLY__
56912-
56913 #include <asm-generic/pgtable-nopud.h>
56914
56915-struct mm_struct;
56916-
56917 #define __PAGETABLE_PMD_FOLDED
56918
56919+#define PMD_SHIFT PUD_SHIFT
56920+#define PTRS_PER_PMD 1
56921+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
56922+#define PMD_MASK (~(PMD_SIZE-1))
56923+
56924+#ifndef __ASSEMBLY__
56925+
56926+struct mm_struct;
56927+
56928 /*
56929 * Having the pmd type consist of a pud gets the size right, and allows
56930 * us to conceptually access the pud entry that this pmd is folded into
56931@@ -16,11 +21,6 @@ struct mm_struct;
56932 */
56933 typedef struct { pud_t pud; } pmd_t;
56934
56935-#define PMD_SHIFT PUD_SHIFT
56936-#define PTRS_PER_PMD 1
56937-#define PMD_SIZE (1UL << PMD_SHIFT)
56938-#define PMD_MASK (~(PMD_SIZE-1))
56939-
56940 /*
56941 * The "pud_xxx()" functions here are trivial for a folded two-level
56942 * setup: the pmd is never bad, and a pmd always exists (as it's folded
56943diff -urNp linux-2.6.32.46/include/asm-generic/pgtable-nopud.h linux-2.6.32.46/include/asm-generic/pgtable-nopud.h
56944--- linux-2.6.32.46/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
56945+++ linux-2.6.32.46/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
56946@@ -1,10 +1,15 @@
56947 #ifndef _PGTABLE_NOPUD_H
56948 #define _PGTABLE_NOPUD_H
56949
56950-#ifndef __ASSEMBLY__
56951-
56952 #define __PAGETABLE_PUD_FOLDED
56953
56954+#define PUD_SHIFT PGDIR_SHIFT
56955+#define PTRS_PER_PUD 1
56956+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
56957+#define PUD_MASK (~(PUD_SIZE-1))
56958+
56959+#ifndef __ASSEMBLY__
56960+
56961 /*
56962 * Having the pud type consist of a pgd gets the size right, and allows
56963 * us to conceptually access the pgd entry that this pud is folded into
56964@@ -12,11 +17,6 @@
56965 */
56966 typedef struct { pgd_t pgd; } pud_t;
56967
56968-#define PUD_SHIFT PGDIR_SHIFT
56969-#define PTRS_PER_PUD 1
56970-#define PUD_SIZE (1UL << PUD_SHIFT)
56971-#define PUD_MASK (~(PUD_SIZE-1))
56972-
56973 /*
56974 * The "pgd_xxx()" functions here are trivial for a folded two-level
56975 * setup: the pud is never bad, and a pud always exists (as it's folded
56976diff -urNp linux-2.6.32.46/include/asm-generic/vmlinux.lds.h linux-2.6.32.46/include/asm-generic/vmlinux.lds.h
56977--- linux-2.6.32.46/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
56978+++ linux-2.6.32.46/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
56979@@ -199,6 +199,7 @@
56980 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
56981 VMLINUX_SYMBOL(__start_rodata) = .; \
56982 *(.rodata) *(.rodata.*) \
56983+ *(.data.read_only) \
56984 *(__vermagic) /* Kernel version magic */ \
56985 *(__markers_strings) /* Markers: strings */ \
56986 *(__tracepoints_strings)/* Tracepoints: strings */ \
56987@@ -656,22 +657,24 @@
56988 * section in the linker script will go there too. @phdr should have
56989 * a leading colon.
56990 *
56991- * Note that this macros defines __per_cpu_load as an absolute symbol.
56992+ * Note that this macros defines per_cpu_load as an absolute symbol.
56993 * If there is no need to put the percpu section at a predetermined
56994 * address, use PERCPU().
56995 */
56996 #define PERCPU_VADDR(vaddr, phdr) \
56997- VMLINUX_SYMBOL(__per_cpu_load) = .; \
56998- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
56999+ per_cpu_load = .; \
57000+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57001 - LOAD_OFFSET) { \
57002+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57003 VMLINUX_SYMBOL(__per_cpu_start) = .; \
57004 *(.data.percpu.first) \
57005- *(.data.percpu.page_aligned) \
57006 *(.data.percpu) \
57007+ . = ALIGN(PAGE_SIZE); \
57008+ *(.data.percpu.page_aligned) \
57009 *(.data.percpu.shared_aligned) \
57010 VMLINUX_SYMBOL(__per_cpu_end) = .; \
57011 } phdr \
57012- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
57013+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
57014
57015 /**
57016 * PERCPU - define output section for percpu area, simple version
57017diff -urNp linux-2.6.32.46/include/drm/drm_crtc_helper.h linux-2.6.32.46/include/drm/drm_crtc_helper.h
57018--- linux-2.6.32.46/include/drm/drm_crtc_helper.h 2011-03-27 14:31:47.000000000 -0400
57019+++ linux-2.6.32.46/include/drm/drm_crtc_helper.h 2011-08-05 20:33:55.000000000 -0400
57020@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
57021
57022 /* reload the current crtc LUT */
57023 void (*load_lut)(struct drm_crtc *crtc);
57024-};
57025+} __no_const;
57026
57027 struct drm_encoder_helper_funcs {
57028 void (*dpms)(struct drm_encoder *encoder, int mode);
57029@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
57030 struct drm_connector *connector);
57031 /* disable encoder when not in use - more explicit than dpms off */
57032 void (*disable)(struct drm_encoder *encoder);
57033-};
57034+} __no_const;
57035
57036 struct drm_connector_helper_funcs {
57037 int (*get_modes)(struct drm_connector *connector);
57038diff -urNp linux-2.6.32.46/include/drm/drmP.h linux-2.6.32.46/include/drm/drmP.h
57039--- linux-2.6.32.46/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
57040+++ linux-2.6.32.46/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
57041@@ -71,6 +71,7 @@
57042 #include <linux/workqueue.h>
57043 #include <linux/poll.h>
57044 #include <asm/pgalloc.h>
57045+#include <asm/local.h>
57046 #include "drm.h"
57047
57048 #include <linux/idr.h>
57049@@ -814,7 +815,7 @@ struct drm_driver {
57050 void (*vgaarb_irq)(struct drm_device *dev, bool state);
57051
57052 /* Driver private ops for this object */
57053- struct vm_operations_struct *gem_vm_ops;
57054+ const struct vm_operations_struct *gem_vm_ops;
57055
57056 int major;
57057 int minor;
57058@@ -917,7 +918,7 @@ struct drm_device {
57059
57060 /** \name Usage Counters */
57061 /*@{ */
57062- int open_count; /**< Outstanding files open */
57063+ local_t open_count; /**< Outstanding files open */
57064 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57065 atomic_t vma_count; /**< Outstanding vma areas open */
57066 int buf_use; /**< Buffers in use -- cannot alloc */
57067@@ -928,7 +929,7 @@ struct drm_device {
57068 /*@{ */
57069 unsigned long counters;
57070 enum drm_stat_type types[15];
57071- atomic_t counts[15];
57072+ atomic_unchecked_t counts[15];
57073 /*@} */
57074
57075 struct list_head filelist;
57076@@ -1016,7 +1017,7 @@ struct drm_device {
57077 struct pci_controller *hose;
57078 #endif
57079 struct drm_sg_mem *sg; /**< Scatter gather memory */
57080- unsigned int num_crtcs; /**< Number of CRTCs on this device */
57081+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
57082 void *dev_private; /**< device private data */
57083 void *mm_private;
57084 struct address_space *dev_mapping;
57085@@ -1042,11 +1043,11 @@ struct drm_device {
57086 spinlock_t object_name_lock;
57087 struct idr object_name_idr;
57088 atomic_t object_count;
57089- atomic_t object_memory;
57090+ atomic_unchecked_t object_memory;
57091 atomic_t pin_count;
57092- atomic_t pin_memory;
57093+ atomic_unchecked_t pin_memory;
57094 atomic_t gtt_count;
57095- atomic_t gtt_memory;
57096+ atomic_unchecked_t gtt_memory;
57097 uint32_t gtt_total;
57098 uint32_t invalidate_domains; /* domains pending invalidation */
57099 uint32_t flush_domains; /* domains pending flush */
57100diff -urNp linux-2.6.32.46/include/drm/ttm/ttm_memory.h linux-2.6.32.46/include/drm/ttm/ttm_memory.h
57101--- linux-2.6.32.46/include/drm/ttm/ttm_memory.h 2011-03-27 14:31:47.000000000 -0400
57102+++ linux-2.6.32.46/include/drm/ttm/ttm_memory.h 2011-08-05 20:33:55.000000000 -0400
57103@@ -47,7 +47,7 @@
57104
57105 struct ttm_mem_shrink {
57106 int (*do_shrink) (struct ttm_mem_shrink *);
57107-};
57108+} __no_const;
57109
57110 /**
57111 * struct ttm_mem_global - Global memory accounting structure.
57112diff -urNp linux-2.6.32.46/include/linux/a.out.h linux-2.6.32.46/include/linux/a.out.h
57113--- linux-2.6.32.46/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
57114+++ linux-2.6.32.46/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
57115@@ -39,6 +39,14 @@ enum machine_type {
57116 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57117 };
57118
57119+/* Constants for the N_FLAGS field */
57120+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57121+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57122+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57123+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57124+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57125+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57126+
57127 #if !defined (N_MAGIC)
57128 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57129 #endif
57130diff -urNp linux-2.6.32.46/include/linux/atmdev.h linux-2.6.32.46/include/linux/atmdev.h
57131--- linux-2.6.32.46/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
57132+++ linux-2.6.32.46/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
57133@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57134 #endif
57135
57136 struct k_atm_aal_stats {
57137-#define __HANDLE_ITEM(i) atomic_t i
57138+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57139 __AAL_STAT_ITEMS
57140 #undef __HANDLE_ITEM
57141 };
57142diff -urNp linux-2.6.32.46/include/linux/backlight.h linux-2.6.32.46/include/linux/backlight.h
57143--- linux-2.6.32.46/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
57144+++ linux-2.6.32.46/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
57145@@ -36,18 +36,18 @@ struct backlight_device;
57146 struct fb_info;
57147
57148 struct backlight_ops {
57149- unsigned int options;
57150+ const unsigned int options;
57151
57152 #define BL_CORE_SUSPENDRESUME (1 << 0)
57153
57154 /* Notify the backlight driver some property has changed */
57155- int (*update_status)(struct backlight_device *);
57156+ int (* const update_status)(struct backlight_device *);
57157 /* Return the current backlight brightness (accounting for power,
57158 fb_blank etc.) */
57159- int (*get_brightness)(struct backlight_device *);
57160+ int (* const get_brightness)(struct backlight_device *);
57161 /* Check if given framebuffer device is the one bound to this backlight;
57162 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
57163- int (*check_fb)(struct fb_info *);
57164+ int (* const check_fb)(struct fb_info *);
57165 };
57166
57167 /* This structure defines all the properties of a backlight */
57168@@ -86,7 +86,7 @@ struct backlight_device {
57169 registered this device has been unloaded, and if class_get_devdata()
57170 points to something in the body of that driver, it is also invalid. */
57171 struct mutex ops_lock;
57172- struct backlight_ops *ops;
57173+ const struct backlight_ops *ops;
57174
57175 /* The framebuffer notifier block */
57176 struct notifier_block fb_notif;
57177@@ -103,7 +103,7 @@ static inline void backlight_update_stat
57178 }
57179
57180 extern struct backlight_device *backlight_device_register(const char *name,
57181- struct device *dev, void *devdata, struct backlight_ops *ops);
57182+ struct device *dev, void *devdata, const struct backlight_ops *ops);
57183 extern void backlight_device_unregister(struct backlight_device *bd);
57184 extern void backlight_force_update(struct backlight_device *bd,
57185 enum backlight_update_reason reason);
57186diff -urNp linux-2.6.32.46/include/linux/binfmts.h linux-2.6.32.46/include/linux/binfmts.h
57187--- linux-2.6.32.46/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
57188+++ linux-2.6.32.46/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
57189@@ -83,6 +83,7 @@ struct linux_binfmt {
57190 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57191 int (*load_shlib)(struct file *);
57192 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
57193+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57194 unsigned long min_coredump; /* minimal dump size */
57195 int hasvdso;
57196 };
57197diff -urNp linux-2.6.32.46/include/linux/blkdev.h linux-2.6.32.46/include/linux/blkdev.h
57198--- linux-2.6.32.46/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
57199+++ linux-2.6.32.46/include/linux/blkdev.h 2011-08-26 20:27:21.000000000 -0400
57200@@ -1278,7 +1278,7 @@ struct block_device_operations {
57201 int (*revalidate_disk) (struct gendisk *);
57202 int (*getgeo)(struct block_device *, struct hd_geometry *);
57203 struct module *owner;
57204-};
57205+} __do_const;
57206
57207 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57208 unsigned long);
57209diff -urNp linux-2.6.32.46/include/linux/blktrace_api.h linux-2.6.32.46/include/linux/blktrace_api.h
57210--- linux-2.6.32.46/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
57211+++ linux-2.6.32.46/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
57212@@ -160,7 +160,7 @@ struct blk_trace {
57213 struct dentry *dir;
57214 struct dentry *dropped_file;
57215 struct dentry *msg_file;
57216- atomic_t dropped;
57217+ atomic_unchecked_t dropped;
57218 };
57219
57220 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57221diff -urNp linux-2.6.32.46/include/linux/byteorder/little_endian.h linux-2.6.32.46/include/linux/byteorder/little_endian.h
57222--- linux-2.6.32.46/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
57223+++ linux-2.6.32.46/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
57224@@ -42,51 +42,51 @@
57225
57226 static inline __le64 __cpu_to_le64p(const __u64 *p)
57227 {
57228- return (__force __le64)*p;
57229+ return (__force const __le64)*p;
57230 }
57231 static inline __u64 __le64_to_cpup(const __le64 *p)
57232 {
57233- return (__force __u64)*p;
57234+ return (__force const __u64)*p;
57235 }
57236 static inline __le32 __cpu_to_le32p(const __u32 *p)
57237 {
57238- return (__force __le32)*p;
57239+ return (__force const __le32)*p;
57240 }
57241 static inline __u32 __le32_to_cpup(const __le32 *p)
57242 {
57243- return (__force __u32)*p;
57244+ return (__force const __u32)*p;
57245 }
57246 static inline __le16 __cpu_to_le16p(const __u16 *p)
57247 {
57248- return (__force __le16)*p;
57249+ return (__force const __le16)*p;
57250 }
57251 static inline __u16 __le16_to_cpup(const __le16 *p)
57252 {
57253- return (__force __u16)*p;
57254+ return (__force const __u16)*p;
57255 }
57256 static inline __be64 __cpu_to_be64p(const __u64 *p)
57257 {
57258- return (__force __be64)__swab64p(p);
57259+ return (__force const __be64)__swab64p(p);
57260 }
57261 static inline __u64 __be64_to_cpup(const __be64 *p)
57262 {
57263- return __swab64p((__u64 *)p);
57264+ return __swab64p((const __u64 *)p);
57265 }
57266 static inline __be32 __cpu_to_be32p(const __u32 *p)
57267 {
57268- return (__force __be32)__swab32p(p);
57269+ return (__force const __be32)__swab32p(p);
57270 }
57271 static inline __u32 __be32_to_cpup(const __be32 *p)
57272 {
57273- return __swab32p((__u32 *)p);
57274+ return __swab32p((const __u32 *)p);
57275 }
57276 static inline __be16 __cpu_to_be16p(const __u16 *p)
57277 {
57278- return (__force __be16)__swab16p(p);
57279+ return (__force const __be16)__swab16p(p);
57280 }
57281 static inline __u16 __be16_to_cpup(const __be16 *p)
57282 {
57283- return __swab16p((__u16 *)p);
57284+ return __swab16p((const __u16 *)p);
57285 }
57286 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57287 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57288diff -urNp linux-2.6.32.46/include/linux/cache.h linux-2.6.32.46/include/linux/cache.h
57289--- linux-2.6.32.46/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
57290+++ linux-2.6.32.46/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
57291@@ -16,6 +16,10 @@
57292 #define __read_mostly
57293 #endif
57294
57295+#ifndef __read_only
57296+#define __read_only __read_mostly
57297+#endif
57298+
57299 #ifndef ____cacheline_aligned
57300 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57301 #endif
57302diff -urNp linux-2.6.32.46/include/linux/capability.h linux-2.6.32.46/include/linux/capability.h
57303--- linux-2.6.32.46/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
57304+++ linux-2.6.32.46/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
57305@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
57306 (security_real_capable_noaudit((t), (cap)) == 0)
57307
57308 extern int capable(int cap);
57309+int capable_nolog(int cap);
57310
57311 /* audit system wants to get cap info from files as well */
57312 struct dentry;
57313diff -urNp linux-2.6.32.46/include/linux/compiler-gcc4.h linux-2.6.32.46/include/linux/compiler-gcc4.h
57314--- linux-2.6.32.46/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
57315+++ linux-2.6.32.46/include/linux/compiler-gcc4.h 2011-08-26 20:19:09.000000000 -0400
57316@@ -36,4 +36,16 @@
57317 the kernel context */
57318 #define __cold __attribute__((__cold__))
57319
57320+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57321+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57322+#define __bos0(ptr) __bos((ptr), 0)
57323+#define __bos1(ptr) __bos((ptr), 1)
57324+
57325+#if __GNUC_MINOR__ >= 5
57326+#ifdef CONSTIFY_PLUGIN
57327+#define __no_const __attribute__((no_const))
57328+#define __do_const __attribute__((do_const))
57329+#endif
57330+#endif
57331+
57332 #endif
57333diff -urNp linux-2.6.32.46/include/linux/compiler.h linux-2.6.32.46/include/linux/compiler.h
57334--- linux-2.6.32.46/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
57335+++ linux-2.6.32.46/include/linux/compiler.h 2011-08-26 20:19:09.000000000 -0400
57336@@ -247,6 +247,14 @@ void ftrace_likely_update(struct ftrace_
57337 # define __attribute_const__ /* unimplemented */
57338 #endif
57339
57340+#ifndef __no_const
57341+# define __no_const
57342+#endif
57343+
57344+#ifndef __do_const
57345+# define __do_const
57346+#endif
57347+
57348 /*
57349 * Tell gcc if a function is cold. The compiler will assume any path
57350 * directly leading to the call is unlikely.
57351@@ -256,6 +264,22 @@ void ftrace_likely_update(struct ftrace_
57352 #define __cold
57353 #endif
57354
57355+#ifndef __alloc_size
57356+#define __alloc_size(...)
57357+#endif
57358+
57359+#ifndef __bos
57360+#define __bos(ptr, arg)
57361+#endif
57362+
57363+#ifndef __bos0
57364+#define __bos0(ptr)
57365+#endif
57366+
57367+#ifndef __bos1
57368+#define __bos1(ptr)
57369+#endif
57370+
57371 /* Simple shorthand for a section definition */
57372 #ifndef __section
57373 # define __section(S) __attribute__ ((__section__(#S)))
57374@@ -278,6 +302,7 @@ void ftrace_likely_update(struct ftrace_
57375 * use is to mediate communication between process-level code and irq/NMI
57376 * handlers, all running on the same CPU.
57377 */
57378-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57379+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57380+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57381
57382 #endif /* __LINUX_COMPILER_H */
57383diff -urNp linux-2.6.32.46/include/linux/crypto.h linux-2.6.32.46/include/linux/crypto.h
57384--- linux-2.6.32.46/include/linux/crypto.h 2011-03-27 14:31:47.000000000 -0400
57385+++ linux-2.6.32.46/include/linux/crypto.h 2011-08-05 20:33:55.000000000 -0400
57386@@ -394,7 +394,7 @@ struct cipher_tfm {
57387 const u8 *key, unsigned int keylen);
57388 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57389 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57390-};
57391+} __no_const;
57392
57393 struct hash_tfm {
57394 int (*init)(struct hash_desc *desc);
57395@@ -415,13 +415,13 @@ struct compress_tfm {
57396 int (*cot_decompress)(struct crypto_tfm *tfm,
57397 const u8 *src, unsigned int slen,
57398 u8 *dst, unsigned int *dlen);
57399-};
57400+} __no_const;
57401
57402 struct rng_tfm {
57403 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57404 unsigned int dlen);
57405 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57406-};
57407+} __no_const;
57408
57409 #define crt_ablkcipher crt_u.ablkcipher
57410 #define crt_aead crt_u.aead
57411diff -urNp linux-2.6.32.46/include/linux/dcache.h linux-2.6.32.46/include/linux/dcache.h
57412--- linux-2.6.32.46/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
57413+++ linux-2.6.32.46/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
57414@@ -119,6 +119,8 @@ struct dentry {
57415 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
57416 };
57417
57418+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
57419+
57420 /*
57421 * dentry->d_lock spinlock nesting subclasses:
57422 *
57423diff -urNp linux-2.6.32.46/include/linux/decompress/mm.h linux-2.6.32.46/include/linux/decompress/mm.h
57424--- linux-2.6.32.46/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
57425+++ linux-2.6.32.46/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
57426@@ -78,7 +78,7 @@ static void free(void *where)
57427 * warnings when not needed (indeed large_malloc / large_free are not
57428 * needed by inflate */
57429
57430-#define malloc(a) kmalloc(a, GFP_KERNEL)
57431+#define malloc(a) kmalloc((a), GFP_KERNEL)
57432 #define free(a) kfree(a)
57433
57434 #define large_malloc(a) vmalloc(a)
57435diff -urNp linux-2.6.32.46/include/linux/dma-mapping.h linux-2.6.32.46/include/linux/dma-mapping.h
57436--- linux-2.6.32.46/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
57437+++ linux-2.6.32.46/include/linux/dma-mapping.h 2011-08-26 20:19:09.000000000 -0400
57438@@ -16,51 +16,51 @@ enum dma_data_direction {
57439 };
57440
57441 struct dma_map_ops {
57442- void* (*alloc_coherent)(struct device *dev, size_t size,
57443+ void* (* const alloc_coherent)(struct device *dev, size_t size,
57444 dma_addr_t *dma_handle, gfp_t gfp);
57445- void (*free_coherent)(struct device *dev, size_t size,
57446+ void (* const free_coherent)(struct device *dev, size_t size,
57447 void *vaddr, dma_addr_t dma_handle);
57448- dma_addr_t (*map_page)(struct device *dev, struct page *page,
57449+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
57450 unsigned long offset, size_t size,
57451 enum dma_data_direction dir,
57452 struct dma_attrs *attrs);
57453- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
57454+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
57455 size_t size, enum dma_data_direction dir,
57456 struct dma_attrs *attrs);
57457- int (*map_sg)(struct device *dev, struct scatterlist *sg,
57458+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
57459 int nents, enum dma_data_direction dir,
57460 struct dma_attrs *attrs);
57461- void (*unmap_sg)(struct device *dev,
57462+ void (* const unmap_sg)(struct device *dev,
57463 struct scatterlist *sg, int nents,
57464 enum dma_data_direction dir,
57465 struct dma_attrs *attrs);
57466- void (*sync_single_for_cpu)(struct device *dev,
57467+ void (* const sync_single_for_cpu)(struct device *dev,
57468 dma_addr_t dma_handle, size_t size,
57469 enum dma_data_direction dir);
57470- void (*sync_single_for_device)(struct device *dev,
57471+ void (* const sync_single_for_device)(struct device *dev,
57472 dma_addr_t dma_handle, size_t size,
57473 enum dma_data_direction dir);
57474- void (*sync_single_range_for_cpu)(struct device *dev,
57475+ void (* const sync_single_range_for_cpu)(struct device *dev,
57476 dma_addr_t dma_handle,
57477 unsigned long offset,
57478 size_t size,
57479 enum dma_data_direction dir);
57480- void (*sync_single_range_for_device)(struct device *dev,
57481+ void (* const sync_single_range_for_device)(struct device *dev,
57482 dma_addr_t dma_handle,
57483 unsigned long offset,
57484 size_t size,
57485 enum dma_data_direction dir);
57486- void (*sync_sg_for_cpu)(struct device *dev,
57487+ void (* const sync_sg_for_cpu)(struct device *dev,
57488 struct scatterlist *sg, int nents,
57489 enum dma_data_direction dir);
57490- void (*sync_sg_for_device)(struct device *dev,
57491+ void (* const sync_sg_for_device)(struct device *dev,
57492 struct scatterlist *sg, int nents,
57493 enum dma_data_direction dir);
57494- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
57495- int (*dma_supported)(struct device *dev, u64 mask);
57496+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
57497+ int (* const dma_supported)(struct device *dev, u64 mask);
57498 int (*set_dma_mask)(struct device *dev, u64 mask);
57499 int is_phys;
57500-};
57501+} __do_const;
57502
57503 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57504
57505diff -urNp linux-2.6.32.46/include/linux/dst.h linux-2.6.32.46/include/linux/dst.h
57506--- linux-2.6.32.46/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
57507+++ linux-2.6.32.46/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
57508@@ -380,7 +380,7 @@ struct dst_node
57509 struct thread_pool *pool;
57510
57511 /* Transaction IDs live here */
57512- atomic_long_t gen;
57513+ atomic_long_unchecked_t gen;
57514
57515 /*
57516 * How frequently and how many times transaction
57517diff -urNp linux-2.6.32.46/include/linux/elf.h linux-2.6.32.46/include/linux/elf.h
57518--- linux-2.6.32.46/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
57519+++ linux-2.6.32.46/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
57520@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57521 #define PT_GNU_EH_FRAME 0x6474e550
57522
57523 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57524+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57525+
57526+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57527+
57528+/* Constants for the e_flags field */
57529+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57530+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57531+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57532+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57533+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57534+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57535
57536 /* These constants define the different elf file types */
57537 #define ET_NONE 0
57538@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
57539 #define DT_DEBUG 21
57540 #define DT_TEXTREL 22
57541 #define DT_JMPREL 23
57542+#define DT_FLAGS 30
57543+ #define DF_TEXTREL 0x00000004
57544 #define DT_ENCODING 32
57545 #define OLD_DT_LOOS 0x60000000
57546 #define DT_LOOS 0x6000000d
57547@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
57548 #define PF_W 0x2
57549 #define PF_X 0x1
57550
57551+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
57552+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
57553+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
57554+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
57555+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
57556+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
57557+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
57558+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
57559+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
57560+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
57561+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
57562+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
57563+
57564 typedef struct elf32_phdr{
57565 Elf32_Word p_type;
57566 Elf32_Off p_offset;
57567@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
57568 #define EI_OSABI 7
57569 #define EI_PAD 8
57570
57571+#define EI_PAX 14
57572+
57573 #define ELFMAG0 0x7f /* EI_MAG */
57574 #define ELFMAG1 'E'
57575 #define ELFMAG2 'L'
57576@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
57577 #define elf_phdr elf32_phdr
57578 #define elf_note elf32_note
57579 #define elf_addr_t Elf32_Off
57580+#define elf_dyn Elf32_Dyn
57581
57582 #else
57583
57584@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
57585 #define elf_phdr elf64_phdr
57586 #define elf_note elf64_note
57587 #define elf_addr_t Elf64_Off
57588+#define elf_dyn Elf64_Dyn
57589
57590 #endif
57591
57592diff -urNp linux-2.6.32.46/include/linux/fscache-cache.h linux-2.6.32.46/include/linux/fscache-cache.h
57593--- linux-2.6.32.46/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
57594+++ linux-2.6.32.46/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
57595@@ -116,7 +116,7 @@ struct fscache_operation {
57596 #endif
57597 };
57598
57599-extern atomic_t fscache_op_debug_id;
57600+extern atomic_unchecked_t fscache_op_debug_id;
57601 extern const struct slow_work_ops fscache_op_slow_work_ops;
57602
57603 extern void fscache_enqueue_operation(struct fscache_operation *);
57604@@ -134,7 +134,7 @@ static inline void fscache_operation_ini
57605 fscache_operation_release_t release)
57606 {
57607 atomic_set(&op->usage, 1);
57608- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
57609+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
57610 op->release = release;
57611 INIT_LIST_HEAD(&op->pend_link);
57612 fscache_set_op_state(op, "Init");
57613diff -urNp linux-2.6.32.46/include/linux/fs.h linux-2.6.32.46/include/linux/fs.h
57614--- linux-2.6.32.46/include/linux/fs.h 2011-07-13 17:23:04.000000000 -0400
57615+++ linux-2.6.32.46/include/linux/fs.h 2011-08-26 20:19:09.000000000 -0400
57616@@ -90,6 +90,11 @@ struct inodes_stat_t {
57617 /* Expect random access pattern */
57618 #define FMODE_RANDOM ((__force fmode_t)4096)
57619
57620+/* Hack for grsec so as not to require read permission simply to execute
57621+ * a binary
57622+ */
57623+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
57624+
57625 /*
57626 * The below are the various read and write types that we support. Some of
57627 * them include behavioral modifiers that send information down to the
57628@@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
57629 unsigned long, unsigned long);
57630
57631 struct address_space_operations {
57632- int (*writepage)(struct page *page, struct writeback_control *wbc);
57633- int (*readpage)(struct file *, struct page *);
57634- void (*sync_page)(struct page *);
57635+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
57636+ int (* const readpage)(struct file *, struct page *);
57637+ void (* const sync_page)(struct page *);
57638
57639 /* Write back some dirty pages from this mapping. */
57640- int (*writepages)(struct address_space *, struct writeback_control *);
57641+ int (* const writepages)(struct address_space *, struct writeback_control *);
57642
57643 /* Set a page dirty. Return true if this dirtied it */
57644- int (*set_page_dirty)(struct page *page);
57645+ int (* const set_page_dirty)(struct page *page);
57646
57647- int (*readpages)(struct file *filp, struct address_space *mapping,
57648+ int (* const readpages)(struct file *filp, struct address_space *mapping,
57649 struct list_head *pages, unsigned nr_pages);
57650
57651- int (*write_begin)(struct file *, struct address_space *mapping,
57652+ int (* const write_begin)(struct file *, struct address_space *mapping,
57653 loff_t pos, unsigned len, unsigned flags,
57654 struct page **pagep, void **fsdata);
57655- int (*write_end)(struct file *, struct address_space *mapping,
57656+ int (* const write_end)(struct file *, struct address_space *mapping,
57657 loff_t pos, unsigned len, unsigned copied,
57658 struct page *page, void *fsdata);
57659
57660 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
57661- sector_t (*bmap)(struct address_space *, sector_t);
57662- void (*invalidatepage) (struct page *, unsigned long);
57663- int (*releasepage) (struct page *, gfp_t);
57664- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
57665+ sector_t (* const bmap)(struct address_space *, sector_t);
57666+ void (* const invalidatepage) (struct page *, unsigned long);
57667+ int (* const releasepage) (struct page *, gfp_t);
57668+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
57669 loff_t offset, unsigned long nr_segs);
57670- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
57671+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
57672 void **, unsigned long *);
57673 /* migrate the contents of a page to the specified target */
57674- int (*migratepage) (struct address_space *,
57675+ int (* const migratepage) (struct address_space *,
57676 struct page *, struct page *);
57677- int (*launder_page) (struct page *);
57678- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
57679+ int (* const launder_page) (struct page *);
57680+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
57681 unsigned long);
57682- int (*error_remove_page)(struct address_space *, struct page *);
57683+ int (* const error_remove_page)(struct address_space *, struct page *);
57684 };
57685
57686 /*
57687@@ -1031,19 +1036,19 @@ static inline int file_check_writeable(s
57688 typedef struct files_struct *fl_owner_t;
57689
57690 struct file_lock_operations {
57691- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
57692- void (*fl_release_private)(struct file_lock *);
57693+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
57694+ void (* const fl_release_private)(struct file_lock *);
57695 };
57696
57697 struct lock_manager_operations {
57698- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
57699- void (*fl_notify)(struct file_lock *); /* unblock callback */
57700- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
57701- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
57702- void (*fl_release_private)(struct file_lock *);
57703- void (*fl_break)(struct file_lock *);
57704- int (*fl_mylease)(struct file_lock *, struct file_lock *);
57705- int (*fl_change)(struct file_lock **, int);
57706+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
57707+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
57708+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
57709+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
57710+ void (* const fl_release_private)(struct file_lock *);
57711+ void (* const fl_break)(struct file_lock *);
57712+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
57713+ int (* const fl_change)(struct file_lock **, int);
57714 };
57715
57716 struct lock_manager {
57717@@ -1442,7 +1447,7 @@ struct fiemap_extent_info {
57718 unsigned int fi_flags; /* Flags as passed from user */
57719 unsigned int fi_extents_mapped; /* Number of mapped extents */
57720 unsigned int fi_extents_max; /* Size of fiemap_extent array */
57721- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
57722+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
57723 * array */
57724 };
57725 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
57726@@ -1512,7 +1517,8 @@ struct file_operations {
57727 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
57728 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
57729 int (*setlease)(struct file *, long, struct file_lock **);
57730-};
57731+} __do_const;
57732+typedef struct file_operations __no_const file_operations_no_const;
57733
57734 struct inode_operations {
57735 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
57736@@ -1559,30 +1565,30 @@ extern ssize_t vfs_writev(struct file *,
57737 unsigned long, loff_t *);
57738
57739 struct super_operations {
57740- struct inode *(*alloc_inode)(struct super_block *sb);
57741- void (*destroy_inode)(struct inode *);
57742+ struct inode *(* const alloc_inode)(struct super_block *sb);
57743+ void (* const destroy_inode)(struct inode *);
57744
57745- void (*dirty_inode) (struct inode *);
57746- int (*write_inode) (struct inode *, int);
57747- void (*drop_inode) (struct inode *);
57748- void (*delete_inode) (struct inode *);
57749- void (*put_super) (struct super_block *);
57750- void (*write_super) (struct super_block *);
57751- int (*sync_fs)(struct super_block *sb, int wait);
57752- int (*freeze_fs) (struct super_block *);
57753- int (*unfreeze_fs) (struct super_block *);
57754- int (*statfs) (struct dentry *, struct kstatfs *);
57755- int (*remount_fs) (struct super_block *, int *, char *);
57756- void (*clear_inode) (struct inode *);
57757- void (*umount_begin) (struct super_block *);
57758+ void (* const dirty_inode) (struct inode *);
57759+ int (* const write_inode) (struct inode *, int);
57760+ void (* const drop_inode) (struct inode *);
57761+ void (* const delete_inode) (struct inode *);
57762+ void (* const put_super) (struct super_block *);
57763+ void (* const write_super) (struct super_block *);
57764+ int (* const sync_fs)(struct super_block *sb, int wait);
57765+ int (* const freeze_fs) (struct super_block *);
57766+ int (* const unfreeze_fs) (struct super_block *);
57767+ int (* const statfs) (struct dentry *, struct kstatfs *);
57768+ int (* const remount_fs) (struct super_block *, int *, char *);
57769+ void (* const clear_inode) (struct inode *);
57770+ void (* const umount_begin) (struct super_block *);
57771
57772- int (*show_options)(struct seq_file *, struct vfsmount *);
57773- int (*show_stats)(struct seq_file *, struct vfsmount *);
57774+ int (* const show_options)(struct seq_file *, struct vfsmount *);
57775+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
57776 #ifdef CONFIG_QUOTA
57777- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
57778- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
57779+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
57780+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
57781 #endif
57782- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
57783+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
57784 };
57785
57786 /*
57787diff -urNp linux-2.6.32.46/include/linux/fs_struct.h linux-2.6.32.46/include/linux/fs_struct.h
57788--- linux-2.6.32.46/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
57789+++ linux-2.6.32.46/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
57790@@ -4,7 +4,7 @@
57791 #include <linux/path.h>
57792
57793 struct fs_struct {
57794- int users;
57795+ atomic_t users;
57796 rwlock_t lock;
57797 int umask;
57798 int in_exec;
57799diff -urNp linux-2.6.32.46/include/linux/ftrace_event.h linux-2.6.32.46/include/linux/ftrace_event.h
57800--- linux-2.6.32.46/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
57801+++ linux-2.6.32.46/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
57802@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
57803 int filter_type);
57804 extern int trace_define_common_fields(struct ftrace_event_call *call);
57805
57806-#define is_signed_type(type) (((type)(-1)) < 0)
57807+#define is_signed_type(type) (((type)(-1)) < (type)1)
57808
57809 int trace_set_clr_event(const char *system, const char *event, int set);
57810
57811diff -urNp linux-2.6.32.46/include/linux/genhd.h linux-2.6.32.46/include/linux/genhd.h
57812--- linux-2.6.32.46/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
57813+++ linux-2.6.32.46/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
57814@@ -161,7 +161,7 @@ struct gendisk {
57815
57816 struct timer_rand_state *random;
57817
57818- atomic_t sync_io; /* RAID */
57819+ atomic_unchecked_t sync_io; /* RAID */
57820 struct work_struct async_notify;
57821 #ifdef CONFIG_BLK_DEV_INTEGRITY
57822 struct blk_integrity *integrity;
57823diff -urNp linux-2.6.32.46/include/linux/gracl.h linux-2.6.32.46/include/linux/gracl.h
57824--- linux-2.6.32.46/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
57825+++ linux-2.6.32.46/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
57826@@ -0,0 +1,317 @@
57827+#ifndef GR_ACL_H
57828+#define GR_ACL_H
57829+
57830+#include <linux/grdefs.h>
57831+#include <linux/resource.h>
57832+#include <linux/capability.h>
57833+#include <linux/dcache.h>
57834+#include <asm/resource.h>
57835+
57836+/* Major status information */
57837+
57838+#define GR_VERSION "grsecurity 2.2.2"
57839+#define GRSECURITY_VERSION 0x2202
57840+
57841+enum {
57842+ GR_SHUTDOWN = 0,
57843+ GR_ENABLE = 1,
57844+ GR_SPROLE = 2,
57845+ GR_RELOAD = 3,
57846+ GR_SEGVMOD = 4,
57847+ GR_STATUS = 5,
57848+ GR_UNSPROLE = 6,
57849+ GR_PASSSET = 7,
57850+ GR_SPROLEPAM = 8,
57851+};
57852+
57853+/* Password setup definitions
57854+ * kernel/grhash.c */
57855+enum {
57856+ GR_PW_LEN = 128,
57857+ GR_SALT_LEN = 16,
57858+ GR_SHA_LEN = 32,
57859+};
57860+
57861+enum {
57862+ GR_SPROLE_LEN = 64,
57863+};
57864+
57865+enum {
57866+ GR_NO_GLOB = 0,
57867+ GR_REG_GLOB,
57868+ GR_CREATE_GLOB
57869+};
57870+
57871+#define GR_NLIMITS 32
57872+
57873+/* Begin Data Structures */
57874+
57875+struct sprole_pw {
57876+ unsigned char *rolename;
57877+ unsigned char salt[GR_SALT_LEN];
57878+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
57879+};
57880+
57881+struct name_entry {
57882+ __u32 key;
57883+ ino_t inode;
57884+ dev_t device;
57885+ char *name;
57886+ __u16 len;
57887+ __u8 deleted;
57888+ struct name_entry *prev;
57889+ struct name_entry *next;
57890+};
57891+
57892+struct inodev_entry {
57893+ struct name_entry *nentry;
57894+ struct inodev_entry *prev;
57895+ struct inodev_entry *next;
57896+};
57897+
57898+struct acl_role_db {
57899+ struct acl_role_label **r_hash;
57900+ __u32 r_size;
57901+};
57902+
57903+struct inodev_db {
57904+ struct inodev_entry **i_hash;
57905+ __u32 i_size;
57906+};
57907+
57908+struct name_db {
57909+ struct name_entry **n_hash;
57910+ __u32 n_size;
57911+};
57912+
57913+struct crash_uid {
57914+ uid_t uid;
57915+ unsigned long expires;
57916+};
57917+
57918+struct gr_hash_struct {
57919+ void **table;
57920+ void **nametable;
57921+ void *first;
57922+ __u32 table_size;
57923+ __u32 used_size;
57924+ int type;
57925+};
57926+
57927+/* Userspace Grsecurity ACL data structures */
57928+
57929+struct acl_subject_label {
57930+ char *filename;
57931+ ino_t inode;
57932+ dev_t device;
57933+ __u32 mode;
57934+ kernel_cap_t cap_mask;
57935+ kernel_cap_t cap_lower;
57936+ kernel_cap_t cap_invert_audit;
57937+
57938+ struct rlimit res[GR_NLIMITS];
57939+ __u32 resmask;
57940+
57941+ __u8 user_trans_type;
57942+ __u8 group_trans_type;
57943+ uid_t *user_transitions;
57944+ gid_t *group_transitions;
57945+ __u16 user_trans_num;
57946+ __u16 group_trans_num;
57947+
57948+ __u32 sock_families[2];
57949+ __u32 ip_proto[8];
57950+ __u32 ip_type;
57951+ struct acl_ip_label **ips;
57952+ __u32 ip_num;
57953+ __u32 inaddr_any_override;
57954+
57955+ __u32 crashes;
57956+ unsigned long expires;
57957+
57958+ struct acl_subject_label *parent_subject;
57959+ struct gr_hash_struct *hash;
57960+ struct acl_subject_label *prev;
57961+ struct acl_subject_label *next;
57962+
57963+ struct acl_object_label **obj_hash;
57964+ __u32 obj_hash_size;
57965+ __u16 pax_flags;
57966+};
57967+
57968+struct role_allowed_ip {
57969+ __u32 addr;
57970+ __u32 netmask;
57971+
57972+ struct role_allowed_ip *prev;
57973+ struct role_allowed_ip *next;
57974+};
57975+
57976+struct role_transition {
57977+ char *rolename;
57978+
57979+ struct role_transition *prev;
57980+ struct role_transition *next;
57981+};
57982+
57983+struct acl_role_label {
57984+ char *rolename;
57985+ uid_t uidgid;
57986+ __u16 roletype;
57987+
57988+ __u16 auth_attempts;
57989+ unsigned long expires;
57990+
57991+ struct acl_subject_label *root_label;
57992+ struct gr_hash_struct *hash;
57993+
57994+ struct acl_role_label *prev;
57995+ struct acl_role_label *next;
57996+
57997+ struct role_transition *transitions;
57998+ struct role_allowed_ip *allowed_ips;
57999+ uid_t *domain_children;
58000+ __u16 domain_child_num;
58001+
58002+ struct acl_subject_label **subj_hash;
58003+ __u32 subj_hash_size;
58004+};
58005+
58006+struct user_acl_role_db {
58007+ struct acl_role_label **r_table;
58008+ __u32 num_pointers; /* Number of allocations to track */
58009+ __u32 num_roles; /* Number of roles */
58010+ __u32 num_domain_children; /* Number of domain children */
58011+ __u32 num_subjects; /* Number of subjects */
58012+ __u32 num_objects; /* Number of objects */
58013+};
58014+
58015+struct acl_object_label {
58016+ char *filename;
58017+ ino_t inode;
58018+ dev_t device;
58019+ __u32 mode;
58020+
58021+ struct acl_subject_label *nested;
58022+ struct acl_object_label *globbed;
58023+
58024+ /* next two structures not used */
58025+
58026+ struct acl_object_label *prev;
58027+ struct acl_object_label *next;
58028+};
58029+
58030+struct acl_ip_label {
58031+ char *iface;
58032+ __u32 addr;
58033+ __u32 netmask;
58034+ __u16 low, high;
58035+ __u8 mode;
58036+ __u32 type;
58037+ __u32 proto[8];
58038+
58039+ /* next two structures not used */
58040+
58041+ struct acl_ip_label *prev;
58042+ struct acl_ip_label *next;
58043+};
58044+
58045+struct gr_arg {
58046+ struct user_acl_role_db role_db;
58047+ unsigned char pw[GR_PW_LEN];
58048+ unsigned char salt[GR_SALT_LEN];
58049+ unsigned char sum[GR_SHA_LEN];
58050+ unsigned char sp_role[GR_SPROLE_LEN];
58051+ struct sprole_pw *sprole_pws;
58052+ dev_t segv_device;
58053+ ino_t segv_inode;
58054+ uid_t segv_uid;
58055+ __u16 num_sprole_pws;
58056+ __u16 mode;
58057+};
58058+
58059+struct gr_arg_wrapper {
58060+ struct gr_arg *arg;
58061+ __u32 version;
58062+ __u32 size;
58063+};
58064+
58065+struct subject_map {
58066+ struct acl_subject_label *user;
58067+ struct acl_subject_label *kernel;
58068+ struct subject_map *prev;
58069+ struct subject_map *next;
58070+};
58071+
58072+struct acl_subj_map_db {
58073+ struct subject_map **s_hash;
58074+ __u32 s_size;
58075+};
58076+
58077+/* End Data Structures Section */
58078+
58079+/* Hash functions generated by empirical testing by Brad Spengler
58080+ Makes good use of the low bits of the inode. Generally 0-1 times
58081+ in loop for successful match. 0-3 for unsuccessful match.
58082+ Shift/add algorithm with modulus of table size and an XOR*/
58083+
58084+static __inline__ unsigned int
58085+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58086+{
58087+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58088+}
58089+
58090+ static __inline__ unsigned int
58091+shash(const struct acl_subject_label *userp, const unsigned int sz)
58092+{
58093+ return ((const unsigned long)userp % sz);
58094+}
58095+
58096+static __inline__ unsigned int
58097+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58098+{
58099+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58100+}
58101+
58102+static __inline__ unsigned int
58103+nhash(const char *name, const __u16 len, const unsigned int sz)
58104+{
58105+ return full_name_hash((const unsigned char *)name, len) % sz;
58106+}
58107+
58108+#define FOR_EACH_ROLE_START(role) \
58109+ role = role_list; \
58110+ while (role) {
58111+
58112+#define FOR_EACH_ROLE_END(role) \
58113+ role = role->prev; \
58114+ }
58115+
58116+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58117+ subj = NULL; \
58118+ iter = 0; \
58119+ while (iter < role->subj_hash_size) { \
58120+ if (subj == NULL) \
58121+ subj = role->subj_hash[iter]; \
58122+ if (subj == NULL) { \
58123+ iter++; \
58124+ continue; \
58125+ }
58126+
58127+#define FOR_EACH_SUBJECT_END(subj,iter) \
58128+ subj = subj->next; \
58129+ if (subj == NULL) \
58130+ iter++; \
58131+ }
58132+
58133+
58134+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58135+ subj = role->hash->first; \
58136+ while (subj != NULL) {
58137+
58138+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58139+ subj = subj->next; \
58140+ }
58141+
58142+#endif
58143+
58144diff -urNp linux-2.6.32.46/include/linux/gralloc.h linux-2.6.32.46/include/linux/gralloc.h
58145--- linux-2.6.32.46/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
58146+++ linux-2.6.32.46/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
58147@@ -0,0 +1,9 @@
58148+#ifndef __GRALLOC_H
58149+#define __GRALLOC_H
58150+
58151+void acl_free_all(void);
58152+int acl_alloc_stack_init(unsigned long size);
58153+void *acl_alloc(unsigned long len);
58154+void *acl_alloc_num(unsigned long num, unsigned long len);
58155+
58156+#endif
58157diff -urNp linux-2.6.32.46/include/linux/grdefs.h linux-2.6.32.46/include/linux/grdefs.h
58158--- linux-2.6.32.46/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
58159+++ linux-2.6.32.46/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
58160@@ -0,0 +1,140 @@
58161+#ifndef GRDEFS_H
58162+#define GRDEFS_H
58163+
58164+/* Begin grsecurity status declarations */
58165+
58166+enum {
58167+ GR_READY = 0x01,
58168+ GR_STATUS_INIT = 0x00 // disabled state
58169+};
58170+
58171+/* Begin ACL declarations */
58172+
58173+/* Role flags */
58174+
58175+enum {
58176+ GR_ROLE_USER = 0x0001,
58177+ GR_ROLE_GROUP = 0x0002,
58178+ GR_ROLE_DEFAULT = 0x0004,
58179+ GR_ROLE_SPECIAL = 0x0008,
58180+ GR_ROLE_AUTH = 0x0010,
58181+ GR_ROLE_NOPW = 0x0020,
58182+ GR_ROLE_GOD = 0x0040,
58183+ GR_ROLE_LEARN = 0x0080,
58184+ GR_ROLE_TPE = 0x0100,
58185+ GR_ROLE_DOMAIN = 0x0200,
58186+ GR_ROLE_PAM = 0x0400,
58187+ GR_ROLE_PERSIST = 0x800
58188+};
58189+
58190+/* ACL Subject and Object mode flags */
58191+enum {
58192+ GR_DELETED = 0x80000000
58193+};
58194+
58195+/* ACL Object-only mode flags */
58196+enum {
58197+ GR_READ = 0x00000001,
58198+ GR_APPEND = 0x00000002,
58199+ GR_WRITE = 0x00000004,
58200+ GR_EXEC = 0x00000008,
58201+ GR_FIND = 0x00000010,
58202+ GR_INHERIT = 0x00000020,
58203+ GR_SETID = 0x00000040,
58204+ GR_CREATE = 0x00000080,
58205+ GR_DELETE = 0x00000100,
58206+ GR_LINK = 0x00000200,
58207+ GR_AUDIT_READ = 0x00000400,
58208+ GR_AUDIT_APPEND = 0x00000800,
58209+ GR_AUDIT_WRITE = 0x00001000,
58210+ GR_AUDIT_EXEC = 0x00002000,
58211+ GR_AUDIT_FIND = 0x00004000,
58212+ GR_AUDIT_INHERIT= 0x00008000,
58213+ GR_AUDIT_SETID = 0x00010000,
58214+ GR_AUDIT_CREATE = 0x00020000,
58215+ GR_AUDIT_DELETE = 0x00040000,
58216+ GR_AUDIT_LINK = 0x00080000,
58217+ GR_PTRACERD = 0x00100000,
58218+ GR_NOPTRACE = 0x00200000,
58219+ GR_SUPPRESS = 0x00400000,
58220+ GR_NOLEARN = 0x00800000,
58221+ GR_INIT_TRANSFER= 0x01000000
58222+};
58223+
58224+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58225+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58226+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58227+
58228+/* ACL subject-only mode flags */
58229+enum {
58230+ GR_KILL = 0x00000001,
58231+ GR_VIEW = 0x00000002,
58232+ GR_PROTECTED = 0x00000004,
58233+ GR_LEARN = 0x00000008,
58234+ GR_OVERRIDE = 0x00000010,
58235+ /* just a placeholder, this mode is only used in userspace */
58236+ GR_DUMMY = 0x00000020,
58237+ GR_PROTSHM = 0x00000040,
58238+ GR_KILLPROC = 0x00000080,
58239+ GR_KILLIPPROC = 0x00000100,
58240+ /* just a placeholder, this mode is only used in userspace */
58241+ GR_NOTROJAN = 0x00000200,
58242+ GR_PROTPROCFD = 0x00000400,
58243+ GR_PROCACCT = 0x00000800,
58244+ GR_RELAXPTRACE = 0x00001000,
58245+ GR_NESTED = 0x00002000,
58246+ GR_INHERITLEARN = 0x00004000,
58247+ GR_PROCFIND = 0x00008000,
58248+ GR_POVERRIDE = 0x00010000,
58249+ GR_KERNELAUTH = 0x00020000,
58250+ GR_ATSECURE = 0x00040000,
58251+ GR_SHMEXEC = 0x00080000
58252+};
58253+
58254+enum {
58255+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58256+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58257+ GR_PAX_ENABLE_MPROTECT = 0x0004,
58258+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
58259+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58260+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58261+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58262+ GR_PAX_DISABLE_MPROTECT = 0x0400,
58263+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
58264+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58265+};
58266+
58267+enum {
58268+ GR_ID_USER = 0x01,
58269+ GR_ID_GROUP = 0x02,
58270+};
58271+
58272+enum {
58273+ GR_ID_ALLOW = 0x01,
58274+ GR_ID_DENY = 0x02,
58275+};
58276+
58277+#define GR_CRASH_RES 31
58278+#define GR_UIDTABLE_MAX 500
58279+
58280+/* begin resource learning section */
58281+enum {
58282+ GR_RLIM_CPU_BUMP = 60,
58283+ GR_RLIM_FSIZE_BUMP = 50000,
58284+ GR_RLIM_DATA_BUMP = 10000,
58285+ GR_RLIM_STACK_BUMP = 1000,
58286+ GR_RLIM_CORE_BUMP = 10000,
58287+ GR_RLIM_RSS_BUMP = 500000,
58288+ GR_RLIM_NPROC_BUMP = 1,
58289+ GR_RLIM_NOFILE_BUMP = 5,
58290+ GR_RLIM_MEMLOCK_BUMP = 50000,
58291+ GR_RLIM_AS_BUMP = 500000,
58292+ GR_RLIM_LOCKS_BUMP = 2,
58293+ GR_RLIM_SIGPENDING_BUMP = 5,
58294+ GR_RLIM_MSGQUEUE_BUMP = 10000,
58295+ GR_RLIM_NICE_BUMP = 1,
58296+ GR_RLIM_RTPRIO_BUMP = 1,
58297+ GR_RLIM_RTTIME_BUMP = 1000000
58298+};
58299+
58300+#endif
58301diff -urNp linux-2.6.32.46/include/linux/grinternal.h linux-2.6.32.46/include/linux/grinternal.h
58302--- linux-2.6.32.46/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
58303+++ linux-2.6.32.46/include/linux/grinternal.h 2011-08-11 19:58:37.000000000 -0400
58304@@ -0,0 +1,217 @@
58305+#ifndef __GRINTERNAL_H
58306+#define __GRINTERNAL_H
58307+
58308+#ifdef CONFIG_GRKERNSEC
58309+
58310+#include <linux/fs.h>
58311+#include <linux/mnt_namespace.h>
58312+#include <linux/nsproxy.h>
58313+#include <linux/gracl.h>
58314+#include <linux/grdefs.h>
58315+#include <linux/grmsg.h>
58316+
58317+void gr_add_learn_entry(const char *fmt, ...)
58318+ __attribute__ ((format (printf, 1, 2)));
58319+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58320+ const struct vfsmount *mnt);
58321+__u32 gr_check_create(const struct dentry *new_dentry,
58322+ const struct dentry *parent,
58323+ const struct vfsmount *mnt, const __u32 mode);
58324+int gr_check_protected_task(const struct task_struct *task);
58325+__u32 to_gr_audit(const __u32 reqmode);
58326+int gr_set_acls(const int type);
58327+int gr_apply_subject_to_task(struct task_struct *task);
58328+int gr_acl_is_enabled(void);
58329+char gr_roletype_to_char(void);
58330+
58331+void gr_handle_alertkill(struct task_struct *task);
58332+char *gr_to_filename(const struct dentry *dentry,
58333+ const struct vfsmount *mnt);
58334+char *gr_to_filename1(const struct dentry *dentry,
58335+ const struct vfsmount *mnt);
58336+char *gr_to_filename2(const struct dentry *dentry,
58337+ const struct vfsmount *mnt);
58338+char *gr_to_filename3(const struct dentry *dentry,
58339+ const struct vfsmount *mnt);
58340+
58341+extern int grsec_enable_harden_ptrace;
58342+extern int grsec_enable_link;
58343+extern int grsec_enable_fifo;
58344+extern int grsec_enable_shm;
58345+extern int grsec_enable_execlog;
58346+extern int grsec_enable_signal;
58347+extern int grsec_enable_audit_ptrace;
58348+extern int grsec_enable_forkfail;
58349+extern int grsec_enable_time;
58350+extern int grsec_enable_rofs;
58351+extern int grsec_enable_chroot_shmat;
58352+extern int grsec_enable_chroot_mount;
58353+extern int grsec_enable_chroot_double;
58354+extern int grsec_enable_chroot_pivot;
58355+extern int grsec_enable_chroot_chdir;
58356+extern int grsec_enable_chroot_chmod;
58357+extern int grsec_enable_chroot_mknod;
58358+extern int grsec_enable_chroot_fchdir;
58359+extern int grsec_enable_chroot_nice;
58360+extern int grsec_enable_chroot_execlog;
58361+extern int grsec_enable_chroot_caps;
58362+extern int grsec_enable_chroot_sysctl;
58363+extern int grsec_enable_chroot_unix;
58364+extern int grsec_enable_tpe;
58365+extern int grsec_tpe_gid;
58366+extern int grsec_enable_tpe_all;
58367+extern int grsec_enable_tpe_invert;
58368+extern int grsec_enable_socket_all;
58369+extern int grsec_socket_all_gid;
58370+extern int grsec_enable_socket_client;
58371+extern int grsec_socket_client_gid;
58372+extern int grsec_enable_socket_server;
58373+extern int grsec_socket_server_gid;
58374+extern int grsec_audit_gid;
58375+extern int grsec_enable_group;
58376+extern int grsec_enable_audit_textrel;
58377+extern int grsec_enable_log_rwxmaps;
58378+extern int grsec_enable_mount;
58379+extern int grsec_enable_chdir;
58380+extern int grsec_resource_logging;
58381+extern int grsec_enable_blackhole;
58382+extern int grsec_lastack_retries;
58383+extern int grsec_enable_brute;
58384+extern int grsec_lock;
58385+
58386+extern spinlock_t grsec_alert_lock;
58387+extern unsigned long grsec_alert_wtime;
58388+extern unsigned long grsec_alert_fyet;
58389+
58390+extern spinlock_t grsec_audit_lock;
58391+
58392+extern rwlock_t grsec_exec_file_lock;
58393+
58394+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58395+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58396+ (tsk)->exec_file->f_vfsmnt) : "/")
58397+
58398+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58399+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58400+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58401+
58402+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58403+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
58404+ (tsk)->exec_file->f_vfsmnt) : "/")
58405+
58406+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58407+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58408+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58409+
58410+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58411+
58412+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58413+
58414+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58415+ (task)->pid, (cred)->uid, \
58416+ (cred)->euid, (cred)->gid, (cred)->egid, \
58417+ gr_parent_task_fullpath(task), \
58418+ (task)->real_parent->comm, (task)->real_parent->pid, \
58419+ (pcred)->uid, (pcred)->euid, \
58420+ (pcred)->gid, (pcred)->egid
58421+
58422+#define GR_CHROOT_CAPS {{ \
58423+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58424+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58425+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58426+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58427+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58428+ CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
58429+
58430+#define security_learn(normal_msg,args...) \
58431+({ \
58432+ read_lock(&grsec_exec_file_lock); \
58433+ gr_add_learn_entry(normal_msg "\n", ## args); \
58434+ read_unlock(&grsec_exec_file_lock); \
58435+})
58436+
58437+enum {
58438+ GR_DO_AUDIT,
58439+ GR_DONT_AUDIT,
58440+ GR_DONT_AUDIT_GOOD
58441+};
58442+
58443+enum {
58444+ GR_TTYSNIFF,
58445+ GR_RBAC,
58446+ GR_RBAC_STR,
58447+ GR_STR_RBAC,
58448+ GR_RBAC_MODE2,
58449+ GR_RBAC_MODE3,
58450+ GR_FILENAME,
58451+ GR_SYSCTL_HIDDEN,
58452+ GR_NOARGS,
58453+ GR_ONE_INT,
58454+ GR_ONE_INT_TWO_STR,
58455+ GR_ONE_STR,
58456+ GR_STR_INT,
58457+ GR_TWO_STR_INT,
58458+ GR_TWO_INT,
58459+ GR_TWO_U64,
58460+ GR_THREE_INT,
58461+ GR_FIVE_INT_TWO_STR,
58462+ GR_TWO_STR,
58463+ GR_THREE_STR,
58464+ GR_FOUR_STR,
58465+ GR_STR_FILENAME,
58466+ GR_FILENAME_STR,
58467+ GR_FILENAME_TWO_INT,
58468+ GR_FILENAME_TWO_INT_STR,
58469+ GR_TEXTREL,
58470+ GR_PTRACE,
58471+ GR_RESOURCE,
58472+ GR_CAP,
58473+ GR_SIG,
58474+ GR_SIG2,
58475+ GR_CRASH1,
58476+ GR_CRASH2,
58477+ GR_PSACCT,
58478+ GR_RWXMAP
58479+};
58480+
58481+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58482+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58483+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58484+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58485+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58486+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58487+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58488+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58489+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58490+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58491+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58492+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58493+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58494+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58495+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58496+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58497+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58498+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58499+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58500+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58501+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58502+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58503+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58504+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58505+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58506+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58507+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58508+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58509+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58510+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58511+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58512+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58513+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58514+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58515+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58516+
58517+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58518+
58519+#endif
58520+
58521+#endif
58522diff -urNp linux-2.6.32.46/include/linux/grmsg.h linux-2.6.32.46/include/linux/grmsg.h
58523--- linux-2.6.32.46/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
58524+++ linux-2.6.32.46/include/linux/grmsg.h 2011-08-25 17:28:11.000000000 -0400
58525@@ -0,0 +1,107 @@
58526+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58527+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58528+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58529+#define GR_STOPMOD_MSG "denied modification of module state by "
58530+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58531+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58532+#define GR_IOPERM_MSG "denied use of ioperm() by "
58533+#define GR_IOPL_MSG "denied use of iopl() by "
58534+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58535+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58536+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58537+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58538+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58539+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58540+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58541+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
58542+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
58543+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
58544+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
58545+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
58546+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
58547+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
58548+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58549+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
58550+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
58551+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
58552+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
58553+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
58554+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
58555+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
58556+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
58557+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58558+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58559+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
58560+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
58561+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
58562+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
58563+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
58564+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
58565+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
58566+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
58567+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
58568+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58569+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
58570+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
58571+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
58572+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
58573+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
58574+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58575+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
58576+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
58577+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
58578+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
58579+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
58580+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
58581+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
58582+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
58583+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
58584+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
58585+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
58586+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
58587+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
58588+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
58589+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
58590+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
58591+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
58592+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
58593+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
58594+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
58595+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58596+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
58597+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
58598+#define GR_FAILFORK_MSG "failed fork with errno %s by "
58599+#define GR_NICE_CHROOT_MSG "denied priority change by "
58600+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
58601+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
58602+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
58603+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
58604+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
58605+#define GR_TIME_MSG "time set by "
58606+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
58607+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
58608+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
58609+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
58610+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58611+#define GR_BIND_MSG "denied bind() by "
58612+#define GR_CONNECT_MSG "denied connect() by "
58613+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
58614+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
58615+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58616+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
58617+#define GR_CAP_ACL_MSG "use of %s denied for "
58618+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58619+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
58620+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
58621+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
58622+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
58623+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
58624+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
58625+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
58626+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
58627+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
58628+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58629+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
58630+#define GR_VM86_MSG "denied use of vm86 by "
58631+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
58632+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
58633diff -urNp linux-2.6.32.46/include/linux/grsecurity.h linux-2.6.32.46/include/linux/grsecurity.h
58634--- linux-2.6.32.46/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
58635+++ linux-2.6.32.46/include/linux/grsecurity.h 2011-08-11 19:58:57.000000000 -0400
58636@@ -0,0 +1,217 @@
58637+#ifndef GR_SECURITY_H
58638+#define GR_SECURITY_H
58639+#include <linux/fs.h>
58640+#include <linux/fs_struct.h>
58641+#include <linux/binfmts.h>
58642+#include <linux/gracl.h>
58643+#include <linux/compat.h>
58644+
58645+/* notify of brain-dead configs */
58646+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58647+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
58648+#endif
58649+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
58650+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
58651+#endif
58652+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58653+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58654+#endif
58655+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
58656+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
58657+#endif
58658+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
58659+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
58660+#endif
58661+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
58662+#error "CONFIG_PAX enabled, but no PaX options are enabled."
58663+#endif
58664+
58665+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58666+void gr_handle_brute_check(void);
58667+void gr_handle_kernel_exploit(void);
58668+int gr_process_user_ban(void);
58669+
58670+char gr_roletype_to_char(void);
58671+
58672+int gr_acl_enable_at_secure(void);
58673+
58674+int gr_check_user_change(int real, int effective, int fs);
58675+int gr_check_group_change(int real, int effective, int fs);
58676+
58677+void gr_del_task_from_ip_table(struct task_struct *p);
58678+
58679+int gr_pid_is_chrooted(struct task_struct *p);
58680+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
58681+int gr_handle_chroot_nice(void);
58682+int gr_handle_chroot_sysctl(const int op);
58683+int gr_handle_chroot_setpriority(struct task_struct *p,
58684+ const int niceval);
58685+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
58686+int gr_handle_chroot_chroot(const struct dentry *dentry,
58687+ const struct vfsmount *mnt);
58688+int gr_handle_chroot_caps(struct path *path);
58689+void gr_handle_chroot_chdir(struct path *path);
58690+int gr_handle_chroot_chmod(const struct dentry *dentry,
58691+ const struct vfsmount *mnt, const int mode);
58692+int gr_handle_chroot_mknod(const struct dentry *dentry,
58693+ const struct vfsmount *mnt, const int mode);
58694+int gr_handle_chroot_mount(const struct dentry *dentry,
58695+ const struct vfsmount *mnt,
58696+ const char *dev_name);
58697+int gr_handle_chroot_pivot(void);
58698+int gr_handle_chroot_unix(const pid_t pid);
58699+
58700+int gr_handle_rawio(const struct inode *inode);
58701+
58702+void gr_handle_ioperm(void);
58703+void gr_handle_iopl(void);
58704+
58705+int gr_tpe_allow(const struct file *file);
58706+
58707+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
58708+void gr_clear_chroot_entries(struct task_struct *task);
58709+
58710+void gr_log_forkfail(const int retval);
58711+void gr_log_timechange(void);
58712+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
58713+void gr_log_chdir(const struct dentry *dentry,
58714+ const struct vfsmount *mnt);
58715+void gr_log_chroot_exec(const struct dentry *dentry,
58716+ const struct vfsmount *mnt);
58717+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
58718+#ifdef CONFIG_COMPAT
58719+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
58720+#endif
58721+void gr_log_remount(const char *devname, const int retval);
58722+void gr_log_unmount(const char *devname, const int retval);
58723+void gr_log_mount(const char *from, const char *to, const int retval);
58724+void gr_log_textrel(struct vm_area_struct *vma);
58725+void gr_log_rwxmmap(struct file *file);
58726+void gr_log_rwxmprotect(struct file *file);
58727+
58728+int gr_handle_follow_link(const struct inode *parent,
58729+ const struct inode *inode,
58730+ const struct dentry *dentry,
58731+ const struct vfsmount *mnt);
58732+int gr_handle_fifo(const struct dentry *dentry,
58733+ const struct vfsmount *mnt,
58734+ const struct dentry *dir, const int flag,
58735+ const int acc_mode);
58736+int gr_handle_hardlink(const struct dentry *dentry,
58737+ const struct vfsmount *mnt,
58738+ struct inode *inode,
58739+ const int mode, const char *to);
58740+
58741+int gr_is_capable(const int cap);
58742+int gr_is_capable_nolog(const int cap);
58743+void gr_learn_resource(const struct task_struct *task, const int limit,
58744+ const unsigned long wanted, const int gt);
58745+void gr_copy_label(struct task_struct *tsk);
58746+void gr_handle_crash(struct task_struct *task, const int sig);
58747+int gr_handle_signal(const struct task_struct *p, const int sig);
58748+int gr_check_crash_uid(const uid_t uid);
58749+int gr_check_protected_task(const struct task_struct *task);
58750+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
58751+int gr_acl_handle_mmap(const struct file *file,
58752+ const unsigned long prot);
58753+int gr_acl_handle_mprotect(const struct file *file,
58754+ const unsigned long prot);
58755+int gr_check_hidden_task(const struct task_struct *tsk);
58756+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
58757+ const struct vfsmount *mnt);
58758+__u32 gr_acl_handle_utime(const struct dentry *dentry,
58759+ const struct vfsmount *mnt);
58760+__u32 gr_acl_handle_access(const struct dentry *dentry,
58761+ const struct vfsmount *mnt, const int fmode);
58762+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
58763+ const struct vfsmount *mnt, mode_t mode);
58764+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
58765+ const struct vfsmount *mnt, mode_t mode);
58766+__u32 gr_acl_handle_chown(const struct dentry *dentry,
58767+ const struct vfsmount *mnt);
58768+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
58769+ const struct vfsmount *mnt);
58770+int gr_handle_ptrace(struct task_struct *task, const long request);
58771+int gr_handle_proc_ptrace(struct task_struct *task);
58772+__u32 gr_acl_handle_execve(const struct dentry *dentry,
58773+ const struct vfsmount *mnt);
58774+int gr_check_crash_exec(const struct file *filp);
58775+int gr_acl_is_enabled(void);
58776+void gr_set_kernel_label(struct task_struct *task);
58777+void gr_set_role_label(struct task_struct *task, const uid_t uid,
58778+ const gid_t gid);
58779+int gr_set_proc_label(const struct dentry *dentry,
58780+ const struct vfsmount *mnt,
58781+ const int unsafe_share);
58782+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
58783+ const struct vfsmount *mnt);
58784+__u32 gr_acl_handle_open(const struct dentry *dentry,
58785+ const struct vfsmount *mnt, const int fmode);
58786+__u32 gr_acl_handle_creat(const struct dentry *dentry,
58787+ const struct dentry *p_dentry,
58788+ const struct vfsmount *p_mnt, const int fmode,
58789+ const int imode);
58790+void gr_handle_create(const struct dentry *dentry,
58791+ const struct vfsmount *mnt);
58792+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
58793+ const struct dentry *parent_dentry,
58794+ const struct vfsmount *parent_mnt,
58795+ const int mode);
58796+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
58797+ const struct dentry *parent_dentry,
58798+ const struct vfsmount *parent_mnt);
58799+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
58800+ const struct vfsmount *mnt);
58801+void gr_handle_delete(const ino_t ino, const dev_t dev);
58802+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
58803+ const struct vfsmount *mnt);
58804+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
58805+ const struct dentry *parent_dentry,
58806+ const struct vfsmount *parent_mnt,
58807+ const char *from);
58808+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
58809+ const struct dentry *parent_dentry,
58810+ const struct vfsmount *parent_mnt,
58811+ const struct dentry *old_dentry,
58812+ const struct vfsmount *old_mnt, const char *to);
58813+int gr_acl_handle_rename(struct dentry *new_dentry,
58814+ struct dentry *parent_dentry,
58815+ const struct vfsmount *parent_mnt,
58816+ struct dentry *old_dentry,
58817+ struct inode *old_parent_inode,
58818+ struct vfsmount *old_mnt, const char *newname);
58819+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
58820+ struct dentry *old_dentry,
58821+ struct dentry *new_dentry,
58822+ struct vfsmount *mnt, const __u8 replace);
58823+__u32 gr_check_link(const struct dentry *new_dentry,
58824+ const struct dentry *parent_dentry,
58825+ const struct vfsmount *parent_mnt,
58826+ const struct dentry *old_dentry,
58827+ const struct vfsmount *old_mnt);
58828+int gr_acl_handle_filldir(const struct file *file, const char *name,
58829+ const unsigned int namelen, const ino_t ino);
58830+
58831+__u32 gr_acl_handle_unix(const struct dentry *dentry,
58832+ const struct vfsmount *mnt);
58833+void gr_acl_handle_exit(void);
58834+void gr_acl_handle_psacct(struct task_struct *task, const long code);
58835+int gr_acl_handle_procpidmem(const struct task_struct *task);
58836+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
58837+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
58838+void gr_audit_ptrace(struct task_struct *task);
58839+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
58840+
58841+#ifdef CONFIG_GRKERNSEC
58842+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
58843+void gr_handle_vm86(void);
58844+void gr_handle_mem_readwrite(u64 from, u64 to);
58845+
58846+extern int grsec_enable_dmesg;
58847+extern int grsec_disable_privio;
58848+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58849+extern int grsec_enable_chroot_findtask;
58850+#endif
58851+#endif
58852+
58853+#endif
58854diff -urNp linux-2.6.32.46/include/linux/hdpu_features.h linux-2.6.32.46/include/linux/hdpu_features.h
58855--- linux-2.6.32.46/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
58856+++ linux-2.6.32.46/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
58857@@ -3,7 +3,7 @@
58858 struct cpustate_t {
58859 spinlock_t lock;
58860 int excl;
58861- int open_count;
58862+ atomic_t open_count;
58863 unsigned char cached_val;
58864 int inited;
58865 unsigned long *set_addr;
58866diff -urNp linux-2.6.32.46/include/linux/highmem.h linux-2.6.32.46/include/linux/highmem.h
58867--- linux-2.6.32.46/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
58868+++ linux-2.6.32.46/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
58869@@ -137,6 +137,18 @@ static inline void clear_highpage(struct
58870 kunmap_atomic(kaddr, KM_USER0);
58871 }
58872
58873+static inline void sanitize_highpage(struct page *page)
58874+{
58875+ void *kaddr;
58876+ unsigned long flags;
58877+
58878+ local_irq_save(flags);
58879+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
58880+ clear_page(kaddr);
58881+ kunmap_atomic(kaddr, KM_CLEARPAGE);
58882+ local_irq_restore(flags);
58883+}
58884+
58885 static inline void zero_user_segments(struct page *page,
58886 unsigned start1, unsigned end1,
58887 unsigned start2, unsigned end2)
58888diff -urNp linux-2.6.32.46/include/linux/i2c.h linux-2.6.32.46/include/linux/i2c.h
58889--- linux-2.6.32.46/include/linux/i2c.h 2011-03-27 14:31:47.000000000 -0400
58890+++ linux-2.6.32.46/include/linux/i2c.h 2011-08-23 21:22:38.000000000 -0400
58891@@ -325,6 +325,7 @@ struct i2c_algorithm {
58892 /* To determine what the adapter supports */
58893 u32 (*functionality) (struct i2c_adapter *);
58894 };
58895+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
58896
58897 /*
58898 * i2c_adapter is the structure used to identify a physical i2c bus along
58899diff -urNp linux-2.6.32.46/include/linux/i2o.h linux-2.6.32.46/include/linux/i2o.h
58900--- linux-2.6.32.46/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
58901+++ linux-2.6.32.46/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
58902@@ -564,7 +564,7 @@ struct i2o_controller {
58903 struct i2o_device *exec; /* Executive */
58904 #if BITS_PER_LONG == 64
58905 spinlock_t context_list_lock; /* lock for context_list */
58906- atomic_t context_list_counter; /* needed for unique contexts */
58907+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
58908 struct list_head context_list; /* list of context id's
58909 and pointers */
58910 #endif
58911diff -urNp linux-2.6.32.46/include/linux/init_task.h linux-2.6.32.46/include/linux/init_task.h
58912--- linux-2.6.32.46/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
58913+++ linux-2.6.32.46/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
58914@@ -83,6 +83,12 @@ extern struct group_info init_groups;
58915 #define INIT_IDS
58916 #endif
58917
58918+#ifdef CONFIG_X86
58919+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
58920+#else
58921+#define INIT_TASK_THREAD_INFO
58922+#endif
58923+
58924 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
58925 /*
58926 * Because of the reduced scope of CAP_SETPCAP when filesystem
58927@@ -156,6 +162,7 @@ extern struct cred init_cred;
58928 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
58929 .comm = "swapper", \
58930 .thread = INIT_THREAD, \
58931+ INIT_TASK_THREAD_INFO \
58932 .fs = &init_fs, \
58933 .files = &init_files, \
58934 .signal = &init_signals, \
58935diff -urNp linux-2.6.32.46/include/linux/intel-iommu.h linux-2.6.32.46/include/linux/intel-iommu.h
58936--- linux-2.6.32.46/include/linux/intel-iommu.h 2011-03-27 14:31:47.000000000 -0400
58937+++ linux-2.6.32.46/include/linux/intel-iommu.h 2011-08-05 20:33:55.000000000 -0400
58938@@ -296,7 +296,7 @@ struct iommu_flush {
58939 u8 fm, u64 type);
58940 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
58941 unsigned int size_order, u64 type);
58942-};
58943+} __no_const;
58944
58945 enum {
58946 SR_DMAR_FECTL_REG,
58947diff -urNp linux-2.6.32.46/include/linux/interrupt.h linux-2.6.32.46/include/linux/interrupt.h
58948--- linux-2.6.32.46/include/linux/interrupt.h 2011-06-25 12:55:35.000000000 -0400
58949+++ linux-2.6.32.46/include/linux/interrupt.h 2011-06-25 12:56:37.000000000 -0400
58950@@ -363,7 +363,7 @@ enum
58951 /* map softirq index to softirq name. update 'softirq_to_name' in
58952 * kernel/softirq.c when adding a new softirq.
58953 */
58954-extern char *softirq_to_name[NR_SOFTIRQS];
58955+extern const char * const softirq_to_name[NR_SOFTIRQS];
58956
58957 /* softirq mask and active fields moved to irq_cpustat_t in
58958 * asm/hardirq.h to get better cache usage. KAO
58959@@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
58960
58961 struct softirq_action
58962 {
58963- void (*action)(struct softirq_action *);
58964+ void (*action)(void);
58965 };
58966
58967 asmlinkage void do_softirq(void);
58968 asmlinkage void __do_softirq(void);
58969-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
58970+extern void open_softirq(int nr, void (*action)(void));
58971 extern void softirq_init(void);
58972 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
58973 extern void raise_softirq_irqoff(unsigned int nr);
58974diff -urNp linux-2.6.32.46/include/linux/irq.h linux-2.6.32.46/include/linux/irq.h
58975--- linux-2.6.32.46/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
58976+++ linux-2.6.32.46/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
58977@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
58978 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
58979 bool boot)
58980 {
58981+#ifdef CONFIG_CPUMASK_OFFSTACK
58982 gfp_t gfp = GFP_ATOMIC;
58983
58984 if (boot)
58985 gfp = GFP_NOWAIT;
58986
58987-#ifdef CONFIG_CPUMASK_OFFSTACK
58988 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
58989 return false;
58990
58991diff -urNp linux-2.6.32.46/include/linux/kallsyms.h linux-2.6.32.46/include/linux/kallsyms.h
58992--- linux-2.6.32.46/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
58993+++ linux-2.6.32.46/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
58994@@ -15,7 +15,8 @@
58995
58996 struct module;
58997
58998-#ifdef CONFIG_KALLSYMS
58999+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59000+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59001 /* Lookup the address for a symbol. Returns 0 if not found. */
59002 unsigned long kallsyms_lookup_name(const char *name);
59003
59004@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
59005 /* Stupid that this does nothing, but I didn't create this mess. */
59006 #define __print_symbol(fmt, addr)
59007 #endif /*CONFIG_KALLSYMS*/
59008+#else /* when included by kallsyms.c, vsnprintf.c, or
59009+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59010+extern void __print_symbol(const char *fmt, unsigned long address);
59011+extern int sprint_symbol(char *buffer, unsigned long address);
59012+const char *kallsyms_lookup(unsigned long addr,
59013+ unsigned long *symbolsize,
59014+ unsigned long *offset,
59015+ char **modname, char *namebuf);
59016+#endif
59017
59018 /* This macro allows us to keep printk typechecking */
59019 static void __check_printsym_format(const char *fmt, ...)
59020diff -urNp linux-2.6.32.46/include/linux/kgdb.h linux-2.6.32.46/include/linux/kgdb.h
59021--- linux-2.6.32.46/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
59022+++ linux-2.6.32.46/include/linux/kgdb.h 2011-08-26 20:25:20.000000000 -0400
59023@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
59024
59025 extern int kgdb_connected;
59026
59027-extern atomic_t kgdb_setting_breakpoint;
59028-extern atomic_t kgdb_cpu_doing_single_step;
59029+extern atomic_unchecked_t kgdb_setting_breakpoint;
59030+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59031
59032 extern struct task_struct *kgdb_usethread;
59033 extern struct task_struct *kgdb_contthread;
59034@@ -235,7 +235,7 @@ struct kgdb_arch {
59035 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
59036 void (*remove_all_hw_break)(void);
59037 void (*correct_hw_break)(void);
59038-};
59039+} __do_const;
59040
59041 /**
59042 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59043@@ -257,14 +257,14 @@ struct kgdb_io {
59044 int (*init) (void);
59045 void (*pre_exception) (void);
59046 void (*post_exception) (void);
59047-};
59048+} __do_const;
59049
59050-extern struct kgdb_arch arch_kgdb_ops;
59051+extern const struct kgdb_arch arch_kgdb_ops;
59052
59053 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
59054
59055-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
59056-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
59057+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
59058+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
59059
59060 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
59061 extern int kgdb_mem2hex(char *mem, char *buf, int count);
59062diff -urNp linux-2.6.32.46/include/linux/kmod.h linux-2.6.32.46/include/linux/kmod.h
59063--- linux-2.6.32.46/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
59064+++ linux-2.6.32.46/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
59065@@ -31,6 +31,8 @@
59066 * usually useless though. */
59067 extern int __request_module(bool wait, const char *name, ...) \
59068 __attribute__((format(printf, 2, 3)));
59069+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
59070+ __attribute__((format(printf, 3, 4)));
59071 #define request_module(mod...) __request_module(true, mod)
59072 #define request_module_nowait(mod...) __request_module(false, mod)
59073 #define try_then_request_module(x, mod...) \
59074diff -urNp linux-2.6.32.46/include/linux/kobject.h linux-2.6.32.46/include/linux/kobject.h
59075--- linux-2.6.32.46/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
59076+++ linux-2.6.32.46/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
59077@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
59078
59079 struct kobj_type {
59080 void (*release)(struct kobject *kobj);
59081- struct sysfs_ops *sysfs_ops;
59082+ const struct sysfs_ops *sysfs_ops;
59083 struct attribute **default_attrs;
59084 };
59085
59086@@ -118,9 +118,9 @@ struct kobj_uevent_env {
59087 };
59088
59089 struct kset_uevent_ops {
59090- int (*filter)(struct kset *kset, struct kobject *kobj);
59091- const char *(*name)(struct kset *kset, struct kobject *kobj);
59092- int (*uevent)(struct kset *kset, struct kobject *kobj,
59093+ int (* const filter)(struct kset *kset, struct kobject *kobj);
59094+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
59095+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
59096 struct kobj_uevent_env *env);
59097 };
59098
59099@@ -132,7 +132,7 @@ struct kobj_attribute {
59100 const char *buf, size_t count);
59101 };
59102
59103-extern struct sysfs_ops kobj_sysfs_ops;
59104+extern const struct sysfs_ops kobj_sysfs_ops;
59105
59106 /**
59107 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
59108@@ -155,14 +155,14 @@ struct kset {
59109 struct list_head list;
59110 spinlock_t list_lock;
59111 struct kobject kobj;
59112- struct kset_uevent_ops *uevent_ops;
59113+ const struct kset_uevent_ops *uevent_ops;
59114 };
59115
59116 extern void kset_init(struct kset *kset);
59117 extern int __must_check kset_register(struct kset *kset);
59118 extern void kset_unregister(struct kset *kset);
59119 extern struct kset * __must_check kset_create_and_add(const char *name,
59120- struct kset_uevent_ops *u,
59121+ const struct kset_uevent_ops *u,
59122 struct kobject *parent_kobj);
59123
59124 static inline struct kset *to_kset(struct kobject *kobj)
59125diff -urNp linux-2.6.32.46/include/linux/kvm_host.h linux-2.6.32.46/include/linux/kvm_host.h
59126--- linux-2.6.32.46/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
59127+++ linux-2.6.32.46/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
59128@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
59129 void vcpu_load(struct kvm_vcpu *vcpu);
59130 void vcpu_put(struct kvm_vcpu *vcpu);
59131
59132-int kvm_init(void *opaque, unsigned int vcpu_size,
59133+int kvm_init(const void *opaque, unsigned int vcpu_size,
59134 struct module *module);
59135 void kvm_exit(void);
59136
59137@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
59138 struct kvm_guest_debug *dbg);
59139 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59140
59141-int kvm_arch_init(void *opaque);
59142+int kvm_arch_init(const void *opaque);
59143 void kvm_arch_exit(void);
59144
59145 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59146diff -urNp linux-2.6.32.46/include/linux/libata.h linux-2.6.32.46/include/linux/libata.h
59147--- linux-2.6.32.46/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
59148+++ linux-2.6.32.46/include/linux/libata.h 2011-08-26 20:19:09.000000000 -0400
59149@@ -525,11 +525,11 @@ struct ata_ioports {
59150
59151 struct ata_host {
59152 spinlock_t lock;
59153- struct device *dev;
59154+ struct device *dev;
59155 void __iomem * const *iomap;
59156 unsigned int n_ports;
59157 void *private_data;
59158- struct ata_port_operations *ops;
59159+ const struct ata_port_operations *ops;
59160 unsigned long flags;
59161 #ifdef CONFIG_ATA_ACPI
59162 acpi_handle acpi_handle;
59163@@ -710,7 +710,7 @@ struct ata_link {
59164
59165 struct ata_port {
59166 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
59167- struct ata_port_operations *ops;
59168+ const struct ata_port_operations *ops;
59169 spinlock_t *lock;
59170 /* Flags owned by the EH context. Only EH should touch these once the
59171 port is active */
59172@@ -884,7 +884,7 @@ struct ata_port_operations {
59173 * fields must be pointers.
59174 */
59175 const struct ata_port_operations *inherits;
59176-};
59177+} __do_const;
59178
59179 struct ata_port_info {
59180 unsigned long flags;
59181@@ -892,7 +892,7 @@ struct ata_port_info {
59182 unsigned long pio_mask;
59183 unsigned long mwdma_mask;
59184 unsigned long udma_mask;
59185- struct ata_port_operations *port_ops;
59186+ const struct ata_port_operations *port_ops;
59187 void *private_data;
59188 };
59189
59190@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
59191 extern const unsigned long sata_deb_timing_hotplug[];
59192 extern const unsigned long sata_deb_timing_long[];
59193
59194-extern struct ata_port_operations ata_dummy_port_ops;
59195+extern const struct ata_port_operations ata_dummy_port_ops;
59196 extern const struct ata_port_info ata_dummy_port_info;
59197
59198 static inline const unsigned long *
59199@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
59200 struct scsi_host_template *sht);
59201 extern void ata_host_detach(struct ata_host *host);
59202 extern void ata_host_init(struct ata_host *, struct device *,
59203- unsigned long, struct ata_port_operations *);
59204+ unsigned long, const struct ata_port_operations *);
59205 extern int ata_scsi_detect(struct scsi_host_template *sht);
59206 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
59207 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
59208diff -urNp linux-2.6.32.46/include/linux/lockd/bind.h linux-2.6.32.46/include/linux/lockd/bind.h
59209--- linux-2.6.32.46/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
59210+++ linux-2.6.32.46/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
59211@@ -23,13 +23,13 @@ struct svc_rqst;
59212 * This is the set of functions for lockd->nfsd communication
59213 */
59214 struct nlmsvc_binding {
59215- __be32 (*fopen)(struct svc_rqst *,
59216+ __be32 (* const fopen)(struct svc_rqst *,
59217 struct nfs_fh *,
59218 struct file **);
59219- void (*fclose)(struct file *);
59220+ void (* const fclose)(struct file *);
59221 };
59222
59223-extern struct nlmsvc_binding * nlmsvc_ops;
59224+extern const struct nlmsvc_binding * nlmsvc_ops;
59225
59226 /*
59227 * Similar to nfs_client_initdata, but without the NFS-specific
59228diff -urNp linux-2.6.32.46/include/linux/mca.h linux-2.6.32.46/include/linux/mca.h
59229--- linux-2.6.32.46/include/linux/mca.h 2011-03-27 14:31:47.000000000 -0400
59230+++ linux-2.6.32.46/include/linux/mca.h 2011-08-05 20:33:55.000000000 -0400
59231@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59232 int region);
59233 void * (*mca_transform_memory)(struct mca_device *,
59234 void *memory);
59235-};
59236+} __no_const;
59237
59238 struct mca_bus {
59239 u64 default_dma_mask;
59240diff -urNp linux-2.6.32.46/include/linux/memory.h linux-2.6.32.46/include/linux/memory.h
59241--- linux-2.6.32.46/include/linux/memory.h 2011-03-27 14:31:47.000000000 -0400
59242+++ linux-2.6.32.46/include/linux/memory.h 2011-08-05 20:33:55.000000000 -0400
59243@@ -108,7 +108,7 @@ struct memory_accessor {
59244 size_t count);
59245 ssize_t (*write)(struct memory_accessor *, const char *buf,
59246 off_t offset, size_t count);
59247-};
59248+} __no_const;
59249
59250 /*
59251 * Kernel text modification mutex, used for code patching. Users of this lock
59252diff -urNp linux-2.6.32.46/include/linux/mm.h linux-2.6.32.46/include/linux/mm.h
59253--- linux-2.6.32.46/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
59254+++ linux-2.6.32.46/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
59255@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
59256
59257 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59258 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59259+
59260+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59261+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59262+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59263+#else
59264 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59265+#endif
59266+
59267 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59268 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59269
59270@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
59271 int set_page_dirty_lock(struct page *page);
59272 int clear_page_dirty_for_io(struct page *page);
59273
59274-/* Is the vma a continuation of the stack vma above it? */
59275-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
59276-{
59277- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59278-}
59279-
59280 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59281 unsigned long old_addr, struct vm_area_struct *new_vma,
59282 unsigned long new_addr, unsigned long len);
59283@@ -890,6 +891,8 @@ struct shrinker {
59284 extern void register_shrinker(struct shrinker *);
59285 extern void unregister_shrinker(struct shrinker *);
59286
59287+pgprot_t vm_get_page_prot(unsigned long vm_flags);
59288+
59289 int vma_wants_writenotify(struct vm_area_struct *vma);
59290
59291 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
59292@@ -1162,6 +1165,7 @@ out:
59293 }
59294
59295 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59296+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59297
59298 extern unsigned long do_brk(unsigned long, unsigned long);
59299
59300@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
59301 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59302 struct vm_area_struct **pprev);
59303
59304+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59305+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59306+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59307+
59308 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59309 NULL if none. Assume start_addr < end_addr. */
59310 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59311@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
59312 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59313 }
59314
59315-pgprot_t vm_get_page_prot(unsigned long vm_flags);
59316 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59317 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59318 unsigned long pfn, unsigned long size, pgprot_t);
59319@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
59320 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
59321 extern int sysctl_memory_failure_early_kill;
59322 extern int sysctl_memory_failure_recovery;
59323-extern atomic_long_t mce_bad_pages;
59324+extern atomic_long_unchecked_t mce_bad_pages;
59325+
59326+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59327+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59328+#else
59329+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59330+#endif
59331
59332 #endif /* __KERNEL__ */
59333 #endif /* _LINUX_MM_H */
59334diff -urNp linux-2.6.32.46/include/linux/mm_types.h linux-2.6.32.46/include/linux/mm_types.h
59335--- linux-2.6.32.46/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
59336+++ linux-2.6.32.46/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
59337@@ -186,6 +186,8 @@ struct vm_area_struct {
59338 #ifdef CONFIG_NUMA
59339 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59340 #endif
59341+
59342+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59343 };
59344
59345 struct core_thread {
59346@@ -287,6 +289,24 @@ struct mm_struct {
59347 #ifdef CONFIG_MMU_NOTIFIER
59348 struct mmu_notifier_mm *mmu_notifier_mm;
59349 #endif
59350+
59351+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59352+ unsigned long pax_flags;
59353+#endif
59354+
59355+#ifdef CONFIG_PAX_DLRESOLVE
59356+ unsigned long call_dl_resolve;
59357+#endif
59358+
59359+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59360+ unsigned long call_syscall;
59361+#endif
59362+
59363+#ifdef CONFIG_PAX_ASLR
59364+ unsigned long delta_mmap; /* randomized offset */
59365+ unsigned long delta_stack; /* randomized offset */
59366+#endif
59367+
59368 };
59369
59370 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
59371diff -urNp linux-2.6.32.46/include/linux/mmu_notifier.h linux-2.6.32.46/include/linux/mmu_notifier.h
59372--- linux-2.6.32.46/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
59373+++ linux-2.6.32.46/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
59374@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
59375 */
59376 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59377 ({ \
59378- pte_t __pte; \
59379+ pte_t ___pte; \
59380 struct vm_area_struct *___vma = __vma; \
59381 unsigned long ___address = __address; \
59382- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59383+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59384 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59385- __pte; \
59386+ ___pte; \
59387 })
59388
59389 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
59390diff -urNp linux-2.6.32.46/include/linux/mmzone.h linux-2.6.32.46/include/linux/mmzone.h
59391--- linux-2.6.32.46/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
59392+++ linux-2.6.32.46/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
59393@@ -350,7 +350,7 @@ struct zone {
59394 unsigned long flags; /* zone flags, see below */
59395
59396 /* Zone statistics */
59397- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59398+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59399
59400 /*
59401 * prev_priority holds the scanning priority for this zone. It is
59402diff -urNp linux-2.6.32.46/include/linux/mod_devicetable.h linux-2.6.32.46/include/linux/mod_devicetable.h
59403--- linux-2.6.32.46/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
59404+++ linux-2.6.32.46/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
59405@@ -12,7 +12,7 @@
59406 typedef unsigned long kernel_ulong_t;
59407 #endif
59408
59409-#define PCI_ANY_ID (~0)
59410+#define PCI_ANY_ID ((__u16)~0)
59411
59412 struct pci_device_id {
59413 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59414@@ -131,7 +131,7 @@ struct usb_device_id {
59415 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59416 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59417
59418-#define HID_ANY_ID (~0)
59419+#define HID_ANY_ID (~0U)
59420
59421 struct hid_device_id {
59422 __u16 bus;
59423diff -urNp linux-2.6.32.46/include/linux/module.h linux-2.6.32.46/include/linux/module.h
59424--- linux-2.6.32.46/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
59425+++ linux-2.6.32.46/include/linux/module.h 2011-08-05 20:33:55.000000000 -0400
59426@@ -16,6 +16,7 @@
59427 #include <linux/kobject.h>
59428 #include <linux/moduleparam.h>
59429 #include <linux/tracepoint.h>
59430+#include <linux/fs.h>
59431
59432 #include <asm/local.h>
59433 #include <asm/module.h>
59434@@ -287,16 +288,16 @@ struct module
59435 int (*init)(void);
59436
59437 /* If this is non-NULL, vfree after init() returns */
59438- void *module_init;
59439+ void *module_init_rx, *module_init_rw;
59440
59441 /* Here is the actual code + data, vfree'd on unload. */
59442- void *module_core;
59443+ void *module_core_rx, *module_core_rw;
59444
59445 /* Here are the sizes of the init and core sections */
59446- unsigned int init_size, core_size;
59447+ unsigned int init_size_rw, core_size_rw;
59448
59449 /* The size of the executable code in each section. */
59450- unsigned int init_text_size, core_text_size;
59451+ unsigned int init_size_rx, core_size_rx;
59452
59453 /* Arch-specific module values */
59454 struct mod_arch_specific arch;
59455@@ -345,6 +346,10 @@ struct module
59456 #ifdef CONFIG_EVENT_TRACING
59457 struct ftrace_event_call *trace_events;
59458 unsigned int num_trace_events;
59459+ struct file_operations trace_id;
59460+ struct file_operations trace_enable;
59461+ struct file_operations trace_format;
59462+ struct file_operations trace_filter;
59463 #endif
59464 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59465 unsigned long *ftrace_callsites;
59466@@ -393,16 +398,46 @@ struct module *__module_address(unsigned
59467 bool is_module_address(unsigned long addr);
59468 bool is_module_text_address(unsigned long addr);
59469
59470+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59471+{
59472+
59473+#ifdef CONFIG_PAX_KERNEXEC
59474+ if (ktla_ktva(addr) >= (unsigned long)start &&
59475+ ktla_ktva(addr) < (unsigned long)start + size)
59476+ return 1;
59477+#endif
59478+
59479+ return ((void *)addr >= start && (void *)addr < start + size);
59480+}
59481+
59482+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59483+{
59484+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59485+}
59486+
59487+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59488+{
59489+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59490+}
59491+
59492+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59493+{
59494+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59495+}
59496+
59497+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59498+{
59499+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59500+}
59501+
59502 static inline int within_module_core(unsigned long addr, struct module *mod)
59503 {
59504- return (unsigned long)mod->module_core <= addr &&
59505- addr < (unsigned long)mod->module_core + mod->core_size;
59506+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59507 }
59508
59509 static inline int within_module_init(unsigned long addr, struct module *mod)
59510 {
59511- return (unsigned long)mod->module_init <= addr &&
59512- addr < (unsigned long)mod->module_init + mod->init_size;
59513+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59514 }
59515
59516 /* Search for module by name: must hold module_mutex. */
59517diff -urNp linux-2.6.32.46/include/linux/moduleloader.h linux-2.6.32.46/include/linux/moduleloader.h
59518--- linux-2.6.32.46/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
59519+++ linux-2.6.32.46/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
59520@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
59521 sections. Returns NULL on failure. */
59522 void *module_alloc(unsigned long size);
59523
59524+#ifdef CONFIG_PAX_KERNEXEC
59525+void *module_alloc_exec(unsigned long size);
59526+#else
59527+#define module_alloc_exec(x) module_alloc(x)
59528+#endif
59529+
59530 /* Free memory returned from module_alloc. */
59531 void module_free(struct module *mod, void *module_region);
59532
59533+#ifdef CONFIG_PAX_KERNEXEC
59534+void module_free_exec(struct module *mod, void *module_region);
59535+#else
59536+#define module_free_exec(x, y) module_free((x), (y))
59537+#endif
59538+
59539 /* Apply the given relocation to the (simplified) ELF. Return -error
59540 or 0. */
59541 int apply_relocate(Elf_Shdr *sechdrs,
59542diff -urNp linux-2.6.32.46/include/linux/moduleparam.h linux-2.6.32.46/include/linux/moduleparam.h
59543--- linux-2.6.32.46/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
59544+++ linux-2.6.32.46/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
59545@@ -132,7 +132,7 @@ struct kparam_array
59546
59547 /* Actually copy string: maxlen param is usually sizeof(string). */
59548 #define module_param_string(name, string, len, perm) \
59549- static const struct kparam_string __param_string_##name \
59550+ static const struct kparam_string __param_string_##name __used \
59551 = { len, string }; \
59552 __module_param_call(MODULE_PARAM_PREFIX, name, \
59553 param_set_copystring, param_get_string, \
59554@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
59555
59556 /* Comma-separated array: *nump is set to number they actually specified. */
59557 #define module_param_array_named(name, array, type, nump, perm) \
59558- static const struct kparam_array __param_arr_##name \
59559+ static const struct kparam_array __param_arr_##name __used \
59560 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
59561 sizeof(array[0]), array }; \
59562 __module_param_call(MODULE_PARAM_PREFIX, name, \
59563diff -urNp linux-2.6.32.46/include/linux/mutex.h linux-2.6.32.46/include/linux/mutex.h
59564--- linux-2.6.32.46/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
59565+++ linux-2.6.32.46/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
59566@@ -51,7 +51,7 @@ struct mutex {
59567 spinlock_t wait_lock;
59568 struct list_head wait_list;
59569 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
59570- struct thread_info *owner;
59571+ struct task_struct *owner;
59572 #endif
59573 #ifdef CONFIG_DEBUG_MUTEXES
59574 const char *name;
59575diff -urNp linux-2.6.32.46/include/linux/namei.h linux-2.6.32.46/include/linux/namei.h
59576--- linux-2.6.32.46/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
59577+++ linux-2.6.32.46/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
59578@@ -22,7 +22,7 @@ struct nameidata {
59579 unsigned int flags;
59580 int last_type;
59581 unsigned depth;
59582- char *saved_names[MAX_NESTED_LINKS + 1];
59583+ const char *saved_names[MAX_NESTED_LINKS + 1];
59584
59585 /* Intent data */
59586 union {
59587@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
59588 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
59589 extern void unlock_rename(struct dentry *, struct dentry *);
59590
59591-static inline void nd_set_link(struct nameidata *nd, char *path)
59592+static inline void nd_set_link(struct nameidata *nd, const char *path)
59593 {
59594 nd->saved_names[nd->depth] = path;
59595 }
59596
59597-static inline char *nd_get_link(struct nameidata *nd)
59598+static inline const char *nd_get_link(const struct nameidata *nd)
59599 {
59600 return nd->saved_names[nd->depth];
59601 }
59602diff -urNp linux-2.6.32.46/include/linux/netdevice.h linux-2.6.32.46/include/linux/netdevice.h
59603--- linux-2.6.32.46/include/linux/netdevice.h 2011-08-09 18:35:30.000000000 -0400
59604+++ linux-2.6.32.46/include/linux/netdevice.h 2011-08-23 21:22:38.000000000 -0400
59605@@ -637,6 +637,7 @@ struct net_device_ops {
59606 u16 xid);
59607 #endif
59608 };
59609+typedef struct net_device_ops __no_const net_device_ops_no_const;
59610
59611 /*
59612 * The DEVICE structure.
59613diff -urNp linux-2.6.32.46/include/linux/netfilter/xt_gradm.h linux-2.6.32.46/include/linux/netfilter/xt_gradm.h
59614--- linux-2.6.32.46/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
59615+++ linux-2.6.32.46/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
59616@@ -0,0 +1,9 @@
59617+#ifndef _LINUX_NETFILTER_XT_GRADM_H
59618+#define _LINUX_NETFILTER_XT_GRADM_H 1
59619+
59620+struct xt_gradm_mtinfo {
59621+ __u16 flags;
59622+ __u16 invflags;
59623+};
59624+
59625+#endif
59626diff -urNp linux-2.6.32.46/include/linux/nodemask.h linux-2.6.32.46/include/linux/nodemask.h
59627--- linux-2.6.32.46/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
59628+++ linux-2.6.32.46/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
59629@@ -464,11 +464,11 @@ static inline int num_node_state(enum no
59630
59631 #define any_online_node(mask) \
59632 ({ \
59633- int node; \
59634- for_each_node_mask(node, (mask)) \
59635- if (node_online(node)) \
59636+ int __node; \
59637+ for_each_node_mask(__node, (mask)) \
59638+ if (node_online(__node)) \
59639 break; \
59640- node; \
59641+ __node; \
59642 })
59643
59644 #define num_online_nodes() num_node_state(N_ONLINE)
59645diff -urNp linux-2.6.32.46/include/linux/oprofile.h linux-2.6.32.46/include/linux/oprofile.h
59646--- linux-2.6.32.46/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
59647+++ linux-2.6.32.46/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
59648@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
59649 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
59650 char const * name, ulong * val);
59651
59652-/** Create a file for read-only access to an atomic_t. */
59653+/** Create a file for read-only access to an atomic_unchecked_t. */
59654 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
59655- char const * name, atomic_t * val);
59656+ char const * name, atomic_unchecked_t * val);
59657
59658 /** create a directory */
59659 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
59660diff -urNp linux-2.6.32.46/include/linux/pagemap.h linux-2.6.32.46/include/linux/pagemap.h
59661--- linux-2.6.32.46/include/linux/pagemap.h 2011-03-27 14:31:47.000000000 -0400
59662+++ linux-2.6.32.46/include/linux/pagemap.h 2011-08-17 19:36:28.000000000 -0400
59663@@ -425,6 +425,7 @@ static inline int fault_in_pages_readabl
59664 if (((unsigned long)uaddr & PAGE_MASK) !=
59665 ((unsigned long)end & PAGE_MASK))
59666 ret = __get_user(c, end);
59667+ (void)c;
59668 }
59669 return ret;
59670 }
59671diff -urNp linux-2.6.32.46/include/linux/perf_event.h linux-2.6.32.46/include/linux/perf_event.h
59672--- linux-2.6.32.46/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
59673+++ linux-2.6.32.46/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
59674@@ -476,7 +476,7 @@ struct hw_perf_event {
59675 struct hrtimer hrtimer;
59676 };
59677 };
59678- atomic64_t prev_count;
59679+ atomic64_unchecked_t prev_count;
59680 u64 sample_period;
59681 u64 last_period;
59682 atomic64_t period_left;
59683@@ -557,7 +557,7 @@ struct perf_event {
59684 const struct pmu *pmu;
59685
59686 enum perf_event_active_state state;
59687- atomic64_t count;
59688+ atomic64_unchecked_t count;
59689
59690 /*
59691 * These are the total time in nanoseconds that the event
59692@@ -595,8 +595,8 @@ struct perf_event {
59693 * These accumulate total time (in nanoseconds) that children
59694 * events have been enabled and running, respectively.
59695 */
59696- atomic64_t child_total_time_enabled;
59697- atomic64_t child_total_time_running;
59698+ atomic64_unchecked_t child_total_time_enabled;
59699+ atomic64_unchecked_t child_total_time_running;
59700
59701 /*
59702 * Protect attach/detach and child_list:
59703diff -urNp linux-2.6.32.46/include/linux/pipe_fs_i.h linux-2.6.32.46/include/linux/pipe_fs_i.h
59704--- linux-2.6.32.46/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
59705+++ linux-2.6.32.46/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
59706@@ -46,9 +46,9 @@ struct pipe_inode_info {
59707 wait_queue_head_t wait;
59708 unsigned int nrbufs, curbuf;
59709 struct page *tmp_page;
59710- unsigned int readers;
59711- unsigned int writers;
59712- unsigned int waiting_writers;
59713+ atomic_t readers;
59714+ atomic_t writers;
59715+ atomic_t waiting_writers;
59716 unsigned int r_counter;
59717 unsigned int w_counter;
59718 struct fasync_struct *fasync_readers;
59719diff -urNp linux-2.6.32.46/include/linux/poison.h linux-2.6.32.46/include/linux/poison.h
59720--- linux-2.6.32.46/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
59721+++ linux-2.6.32.46/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
59722@@ -19,8 +19,8 @@
59723 * under normal circumstances, used to verify that nobody uses
59724 * non-initialized list entries.
59725 */
59726-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
59727-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
59728+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
59729+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
59730
59731 /********** include/linux/timer.h **********/
59732 /*
59733diff -urNp linux-2.6.32.46/include/linux/posix-timers.h linux-2.6.32.46/include/linux/posix-timers.h
59734--- linux-2.6.32.46/include/linux/posix-timers.h 2011-03-27 14:31:47.000000000 -0400
59735+++ linux-2.6.32.46/include/linux/posix-timers.h 2011-08-05 20:33:55.000000000 -0400
59736@@ -67,7 +67,7 @@ struct k_itimer {
59737 };
59738
59739 struct k_clock {
59740- int res; /* in nanoseconds */
59741+ const int res; /* in nanoseconds */
59742 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
59743 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
59744 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
59745diff -urNp linux-2.6.32.46/include/linux/preempt.h linux-2.6.32.46/include/linux/preempt.h
59746--- linux-2.6.32.46/include/linux/preempt.h 2011-03-27 14:31:47.000000000 -0400
59747+++ linux-2.6.32.46/include/linux/preempt.h 2011-08-05 20:33:55.000000000 -0400
59748@@ -110,7 +110,7 @@ struct preempt_ops {
59749 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
59750 void (*sched_out)(struct preempt_notifier *notifier,
59751 struct task_struct *next);
59752-};
59753+} __no_const;
59754
59755 /**
59756 * preempt_notifier - key for installing preemption notifiers
59757diff -urNp linux-2.6.32.46/include/linux/proc_fs.h linux-2.6.32.46/include/linux/proc_fs.h
59758--- linux-2.6.32.46/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
59759+++ linux-2.6.32.46/include/linux/proc_fs.h 2011-08-05 20:33:55.000000000 -0400
59760@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
59761 return proc_create_data(name, mode, parent, proc_fops, NULL);
59762 }
59763
59764+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
59765+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
59766+{
59767+#ifdef CONFIG_GRKERNSEC_PROC_USER
59768+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
59769+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59770+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
59771+#else
59772+ return proc_create_data(name, mode, parent, proc_fops, NULL);
59773+#endif
59774+}
59775+
59776+
59777 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
59778 mode_t mode, struct proc_dir_entry *base,
59779 read_proc_t *read_proc, void * data)
59780@@ -256,7 +269,7 @@ union proc_op {
59781 int (*proc_show)(struct seq_file *m,
59782 struct pid_namespace *ns, struct pid *pid,
59783 struct task_struct *task);
59784-};
59785+} __no_const;
59786
59787 struct ctl_table_header;
59788 struct ctl_table;
59789diff -urNp linux-2.6.32.46/include/linux/ptrace.h linux-2.6.32.46/include/linux/ptrace.h
59790--- linux-2.6.32.46/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
59791+++ linux-2.6.32.46/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
59792@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
59793 extern void exit_ptrace(struct task_struct *tracer);
59794 #define PTRACE_MODE_READ 1
59795 #define PTRACE_MODE_ATTACH 2
59796-/* Returns 0 on success, -errno on denial. */
59797-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
59798 /* Returns true on success, false on denial. */
59799 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
59800+/* Returns true on success, false on denial. */
59801+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
59802
59803 static inline int ptrace_reparented(struct task_struct *child)
59804 {
59805diff -urNp linux-2.6.32.46/include/linux/random.h linux-2.6.32.46/include/linux/random.h
59806--- linux-2.6.32.46/include/linux/random.h 2011-08-16 20:37:25.000000000 -0400
59807+++ linux-2.6.32.46/include/linux/random.h 2011-08-07 19:48:09.000000000 -0400
59808@@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned l
59809 u32 random32(void);
59810 void srandom32(u32 seed);
59811
59812+static inline unsigned long pax_get_random_long(void)
59813+{
59814+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
59815+}
59816+
59817 #endif /* __KERNEL___ */
59818
59819 #endif /* _LINUX_RANDOM_H */
59820diff -urNp linux-2.6.32.46/include/linux/reboot.h linux-2.6.32.46/include/linux/reboot.h
59821--- linux-2.6.32.46/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
59822+++ linux-2.6.32.46/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
59823@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
59824 * Architecture-specific implementations of sys_reboot commands.
59825 */
59826
59827-extern void machine_restart(char *cmd);
59828-extern void machine_halt(void);
59829-extern void machine_power_off(void);
59830+extern void machine_restart(char *cmd) __noreturn;
59831+extern void machine_halt(void) __noreturn;
59832+extern void machine_power_off(void) __noreturn;
59833
59834 extern void machine_shutdown(void);
59835 struct pt_regs;
59836@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
59837 */
59838
59839 extern void kernel_restart_prepare(char *cmd);
59840-extern void kernel_restart(char *cmd);
59841-extern void kernel_halt(void);
59842-extern void kernel_power_off(void);
59843+extern void kernel_restart(char *cmd) __noreturn;
59844+extern void kernel_halt(void) __noreturn;
59845+extern void kernel_power_off(void) __noreturn;
59846
59847 void ctrl_alt_del(void);
59848
59849@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
59850 * Emergency restart, callable from an interrupt handler.
59851 */
59852
59853-extern void emergency_restart(void);
59854+extern void emergency_restart(void) __noreturn;
59855 #include <asm/emergency-restart.h>
59856
59857 #endif
59858diff -urNp linux-2.6.32.46/include/linux/reiserfs_fs.h linux-2.6.32.46/include/linux/reiserfs_fs.h
59859--- linux-2.6.32.46/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
59860+++ linux-2.6.32.46/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
59861@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
59862 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
59863
59864 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
59865-#define get_generation(s) atomic_read (&fs_generation(s))
59866+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
59867 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
59868 #define __fs_changed(gen,s) (gen != get_generation (s))
59869 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
59870@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
59871 */
59872
59873 struct item_operations {
59874- int (*bytes_number) (struct item_head * ih, int block_size);
59875- void (*decrement_key) (struct cpu_key *);
59876- int (*is_left_mergeable) (struct reiserfs_key * ih,
59877+ int (* const bytes_number) (struct item_head * ih, int block_size);
59878+ void (* const decrement_key) (struct cpu_key *);
59879+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
59880 unsigned long bsize);
59881- void (*print_item) (struct item_head *, char *item);
59882- void (*check_item) (struct item_head *, char *item);
59883+ void (* const print_item) (struct item_head *, char *item);
59884+ void (* const check_item) (struct item_head *, char *item);
59885
59886- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
59887+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
59888 int is_affected, int insert_size);
59889- int (*check_left) (struct virtual_item * vi, int free,
59890+ int (* const check_left) (struct virtual_item * vi, int free,
59891 int start_skip, int end_skip);
59892- int (*check_right) (struct virtual_item * vi, int free);
59893- int (*part_size) (struct virtual_item * vi, int from, int to);
59894- int (*unit_num) (struct virtual_item * vi);
59895- void (*print_vi) (struct virtual_item * vi);
59896+ int (* const check_right) (struct virtual_item * vi, int free);
59897+ int (* const part_size) (struct virtual_item * vi, int from, int to);
59898+ int (* const unit_num) (struct virtual_item * vi);
59899+ void (* const print_vi) (struct virtual_item * vi);
59900 };
59901
59902-extern struct item_operations *item_ops[TYPE_ANY + 1];
59903+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
59904
59905 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
59906 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
59907diff -urNp linux-2.6.32.46/include/linux/reiserfs_fs_sb.h linux-2.6.32.46/include/linux/reiserfs_fs_sb.h
59908--- linux-2.6.32.46/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
59909+++ linux-2.6.32.46/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
59910@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
59911 /* Comment? -Hans */
59912 wait_queue_head_t s_wait;
59913 /* To be obsoleted soon by per buffer seals.. -Hans */
59914- atomic_t s_generation_counter; // increased by one every time the
59915+ atomic_unchecked_t s_generation_counter; // increased by one every time the
59916 // tree gets re-balanced
59917 unsigned long s_properties; /* File system properties. Currently holds
59918 on-disk FS format */
59919diff -urNp linux-2.6.32.46/include/linux/relay.h linux-2.6.32.46/include/linux/relay.h
59920--- linux-2.6.32.46/include/linux/relay.h 2011-03-27 14:31:47.000000000 -0400
59921+++ linux-2.6.32.46/include/linux/relay.h 2011-08-05 20:33:55.000000000 -0400
59922@@ -159,7 +159,7 @@ struct rchan_callbacks
59923 * The callback should return 0 if successful, negative if not.
59924 */
59925 int (*remove_buf_file)(struct dentry *dentry);
59926-};
59927+} __no_const;
59928
59929 /*
59930 * CONFIG_RELAY kernel API, kernel/relay.c
59931diff -urNp linux-2.6.32.46/include/linux/rfkill.h linux-2.6.32.46/include/linux/rfkill.h
59932--- linux-2.6.32.46/include/linux/rfkill.h 2011-03-27 14:31:47.000000000 -0400
59933+++ linux-2.6.32.46/include/linux/rfkill.h 2011-08-23 21:22:38.000000000 -0400
59934@@ -144,6 +144,7 @@ struct rfkill_ops {
59935 void (*query)(struct rfkill *rfkill, void *data);
59936 int (*set_block)(void *data, bool blocked);
59937 };
59938+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
59939
59940 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
59941 /**
59942diff -urNp linux-2.6.32.46/include/linux/sched.h linux-2.6.32.46/include/linux/sched.h
59943--- linux-2.6.32.46/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
59944+++ linux-2.6.32.46/include/linux/sched.h 2011-08-11 19:48:55.000000000 -0400
59945@@ -101,6 +101,7 @@ struct bio;
59946 struct fs_struct;
59947 struct bts_context;
59948 struct perf_event_context;
59949+struct linux_binprm;
59950
59951 /*
59952 * List of flags we want to share for kernel threads,
59953@@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
59954 extern signed long schedule_timeout_uninterruptible(signed long timeout);
59955 asmlinkage void __schedule(void);
59956 asmlinkage void schedule(void);
59957-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
59958+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
59959
59960 struct nsproxy;
59961 struct user_namespace;
59962@@ -371,9 +372,12 @@ struct user_namespace;
59963 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
59964
59965 extern int sysctl_max_map_count;
59966+extern unsigned long sysctl_heap_stack_gap;
59967
59968 #include <linux/aio.h>
59969
59970+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
59971+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
59972 extern unsigned long
59973 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
59974 unsigned long, unsigned long);
59975@@ -666,6 +670,16 @@ struct signal_struct {
59976 struct tty_audit_buf *tty_audit_buf;
59977 #endif
59978
59979+#ifdef CONFIG_GRKERNSEC
59980+ u32 curr_ip;
59981+ u32 saved_ip;
59982+ u32 gr_saddr;
59983+ u32 gr_daddr;
59984+ u16 gr_sport;
59985+ u16 gr_dport;
59986+ u8 used_accept:1;
59987+#endif
59988+
59989 int oom_adj; /* OOM kill score adjustment (bit shift) */
59990 };
59991
59992@@ -723,6 +737,11 @@ struct user_struct {
59993 struct key *session_keyring; /* UID's default session keyring */
59994 #endif
59995
59996+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59997+ unsigned int banned;
59998+ unsigned long ban_expires;
59999+#endif
60000+
60001 /* Hash table maintenance information */
60002 struct hlist_node uidhash_node;
60003 uid_t uid;
60004@@ -1328,8 +1347,8 @@ struct task_struct {
60005 struct list_head thread_group;
60006
60007 struct completion *vfork_done; /* for vfork() */
60008- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60009- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60010+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60011+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60012
60013 cputime_t utime, stime, utimescaled, stimescaled;
60014 cputime_t gtime;
60015@@ -1343,16 +1362,6 @@ struct task_struct {
60016 struct task_cputime cputime_expires;
60017 struct list_head cpu_timers[3];
60018
60019-/* process credentials */
60020- const struct cred *real_cred; /* objective and real subjective task
60021- * credentials (COW) */
60022- const struct cred *cred; /* effective (overridable) subjective task
60023- * credentials (COW) */
60024- struct mutex cred_guard_mutex; /* guard against foreign influences on
60025- * credential calculations
60026- * (notably. ptrace) */
60027- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60028-
60029 char comm[TASK_COMM_LEN]; /* executable name excluding path
60030 - access with [gs]et_task_comm (which lock
60031 it with task_lock())
60032@@ -1369,6 +1378,10 @@ struct task_struct {
60033 #endif
60034 /* CPU-specific state of this task */
60035 struct thread_struct thread;
60036+/* thread_info moved to task_struct */
60037+#ifdef CONFIG_X86
60038+ struct thread_info tinfo;
60039+#endif
60040 /* filesystem information */
60041 struct fs_struct *fs;
60042 /* open file information */
60043@@ -1436,6 +1449,15 @@ struct task_struct {
60044 int hardirq_context;
60045 int softirq_context;
60046 #endif
60047+
60048+/* process credentials */
60049+ const struct cred *real_cred; /* objective and real subjective task
60050+ * credentials (COW) */
60051+ struct mutex cred_guard_mutex; /* guard against foreign influences on
60052+ * credential calculations
60053+ * (notably. ptrace) */
60054+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60055+
60056 #ifdef CONFIG_LOCKDEP
60057 # define MAX_LOCK_DEPTH 48UL
60058 u64 curr_chain_key;
60059@@ -1456,6 +1478,9 @@ struct task_struct {
60060
60061 struct backing_dev_info *backing_dev_info;
60062
60063+ const struct cred *cred; /* effective (overridable) subjective task
60064+ * credentials (COW) */
60065+
60066 struct io_context *io_context;
60067
60068 unsigned long ptrace_message;
60069@@ -1519,6 +1544,21 @@ struct task_struct {
60070 unsigned long default_timer_slack_ns;
60071
60072 struct list_head *scm_work_list;
60073+
60074+#ifdef CONFIG_GRKERNSEC
60075+ /* grsecurity */
60076+ struct dentry *gr_chroot_dentry;
60077+ struct acl_subject_label *acl;
60078+ struct acl_role_label *role;
60079+ struct file *exec_file;
60080+ u16 acl_role_id;
60081+ /* is this the task that authenticated to the special role */
60082+ u8 acl_sp_role;
60083+ u8 is_writable;
60084+ u8 brute;
60085+ u8 gr_is_chrooted;
60086+#endif
60087+
60088 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60089 /* Index of current stored adress in ret_stack */
60090 int curr_ret_stack;
60091@@ -1542,6 +1582,57 @@ struct task_struct {
60092 #endif /* CONFIG_TRACING */
60093 };
60094
60095+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60096+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60097+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60098+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60099+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60100+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60101+
60102+#ifdef CONFIG_PAX_SOFTMODE
60103+extern int pax_softmode;
60104+#endif
60105+
60106+extern int pax_check_flags(unsigned long *);
60107+
60108+/* if tsk != current then task_lock must be held on it */
60109+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60110+static inline unsigned long pax_get_flags(struct task_struct *tsk)
60111+{
60112+ if (likely(tsk->mm))
60113+ return tsk->mm->pax_flags;
60114+ else
60115+ return 0UL;
60116+}
60117+
60118+/* if tsk != current then task_lock must be held on it */
60119+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60120+{
60121+ if (likely(tsk->mm)) {
60122+ tsk->mm->pax_flags = flags;
60123+ return 0;
60124+ }
60125+ return -EINVAL;
60126+}
60127+#endif
60128+
60129+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60130+extern void pax_set_initial_flags(struct linux_binprm *bprm);
60131+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60132+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60133+#endif
60134+
60135+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60136+extern void pax_report_insns(void *pc, void *sp);
60137+extern void pax_report_refcount_overflow(struct pt_regs *regs);
60138+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60139+
60140+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
60141+extern void pax_track_stack(void);
60142+#else
60143+static inline void pax_track_stack(void) {}
60144+#endif
60145+
60146 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60147 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
60148
60149@@ -1740,7 +1831,7 @@ extern void thread_group_times(struct ta
60150 #define PF_DUMPCORE 0x00000200 /* dumped core */
60151 #define PF_SIGNALED 0x00000400 /* killed by a signal */
60152 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
60153-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
60154+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
60155 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
60156 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
60157 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
60158@@ -1978,7 +2069,9 @@ void yield(void);
60159 extern struct exec_domain default_exec_domain;
60160
60161 union thread_union {
60162+#ifndef CONFIG_X86
60163 struct thread_info thread_info;
60164+#endif
60165 unsigned long stack[THREAD_SIZE/sizeof(long)];
60166 };
60167
60168@@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
60169 */
60170
60171 extern struct task_struct *find_task_by_vpid(pid_t nr);
60172+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60173 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60174 struct pid_namespace *ns);
60175
60176@@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sig
60177 extern void exit_itimers(struct signal_struct *);
60178 extern void flush_itimer_signals(void);
60179
60180-extern NORET_TYPE void do_group_exit(int);
60181+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60182
60183 extern void daemonize(const char *, ...);
60184 extern int allow_signal(int);
60185@@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stac
60186
60187 #endif
60188
60189-static inline int object_is_on_stack(void *obj)
60190+static inline int object_starts_on_stack(void *obj)
60191 {
60192- void *stack = task_stack_page(current);
60193+ const void *stack = task_stack_page(current);
60194
60195 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60196 }
60197
60198+#ifdef CONFIG_PAX_USERCOPY
60199+extern int object_is_on_stack(const void *obj, unsigned long len);
60200+#endif
60201+
60202 extern void thread_info_cache_init(void);
60203
60204 #ifdef CONFIG_DEBUG_STACK_USAGE
60205diff -urNp linux-2.6.32.46/include/linux/screen_info.h linux-2.6.32.46/include/linux/screen_info.h
60206--- linux-2.6.32.46/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
60207+++ linux-2.6.32.46/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
60208@@ -42,7 +42,8 @@ struct screen_info {
60209 __u16 pages; /* 0x32 */
60210 __u16 vesa_attributes; /* 0x34 */
60211 __u32 capabilities; /* 0x36 */
60212- __u8 _reserved[6]; /* 0x3a */
60213+ __u16 vesapm_size; /* 0x3a */
60214+ __u8 _reserved[4]; /* 0x3c */
60215 } __attribute__((packed));
60216
60217 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60218diff -urNp linux-2.6.32.46/include/linux/security.h linux-2.6.32.46/include/linux/security.h
60219--- linux-2.6.32.46/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
60220+++ linux-2.6.32.46/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
60221@@ -34,6 +34,7 @@
60222 #include <linux/key.h>
60223 #include <linux/xfrm.h>
60224 #include <linux/gfp.h>
60225+#include <linux/grsecurity.h>
60226 #include <net/flow.h>
60227
60228 /* Maximum number of letters for an LSM name string */
60229diff -urNp linux-2.6.32.46/include/linux/seq_file.h linux-2.6.32.46/include/linux/seq_file.h
60230--- linux-2.6.32.46/include/linux/seq_file.h 2011-03-27 14:31:47.000000000 -0400
60231+++ linux-2.6.32.46/include/linux/seq_file.h 2011-08-23 21:22:38.000000000 -0400
60232@@ -32,6 +32,7 @@ struct seq_operations {
60233 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60234 int (*show) (struct seq_file *m, void *v);
60235 };
60236+typedef struct seq_operations __no_const seq_operations_no_const;
60237
60238 #define SEQ_SKIP 1
60239
60240diff -urNp linux-2.6.32.46/include/linux/shm.h linux-2.6.32.46/include/linux/shm.h
60241--- linux-2.6.32.46/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
60242+++ linux-2.6.32.46/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
60243@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
60244 pid_t shm_cprid;
60245 pid_t shm_lprid;
60246 struct user_struct *mlock_user;
60247+#ifdef CONFIG_GRKERNSEC
60248+ time_t shm_createtime;
60249+ pid_t shm_lapid;
60250+#endif
60251 };
60252
60253 /* shm_mode upper byte flags */
60254diff -urNp linux-2.6.32.46/include/linux/skbuff.h linux-2.6.32.46/include/linux/skbuff.h
60255--- linux-2.6.32.46/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
60256+++ linux-2.6.32.46/include/linux/skbuff.h 2011-08-21 15:27:56.000000000 -0400
60257@@ -14,6 +14,7 @@
60258 #ifndef _LINUX_SKBUFF_H
60259 #define _LINUX_SKBUFF_H
60260
60261+#include <linux/const.h>
60262 #include <linux/kernel.h>
60263 #include <linux/kmemcheck.h>
60264 #include <linux/compiler.h>
60265@@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_t
60266 */
60267 static inline int skb_queue_empty(const struct sk_buff_head *list)
60268 {
60269- return list->next == (struct sk_buff *)list;
60270+ return list->next == (const struct sk_buff *)list;
60271 }
60272
60273 /**
60274@@ -557,7 +558,7 @@ static inline int skb_queue_empty(const
60275 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60276 const struct sk_buff *skb)
60277 {
60278- return (skb->next == (struct sk_buff *) list);
60279+ return (skb->next == (const struct sk_buff *) list);
60280 }
60281
60282 /**
60283@@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(con
60284 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60285 const struct sk_buff *skb)
60286 {
60287- return (skb->prev == (struct sk_buff *) list);
60288+ return (skb->prev == (const struct sk_buff *) list);
60289 }
60290
60291 /**
60292@@ -1367,7 +1368,7 @@ static inline int skb_network_offset(con
60293 * headroom, you should not reduce this.
60294 */
60295 #ifndef NET_SKB_PAD
60296-#define NET_SKB_PAD 32
60297+#define NET_SKB_PAD (_AC(32,UL))
60298 #endif
60299
60300 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60301diff -urNp linux-2.6.32.46/include/linux/slab_def.h linux-2.6.32.46/include/linux/slab_def.h
60302--- linux-2.6.32.46/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
60303+++ linux-2.6.32.46/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
60304@@ -69,10 +69,10 @@ struct kmem_cache {
60305 unsigned long node_allocs;
60306 unsigned long node_frees;
60307 unsigned long node_overflow;
60308- atomic_t allochit;
60309- atomic_t allocmiss;
60310- atomic_t freehit;
60311- atomic_t freemiss;
60312+ atomic_unchecked_t allochit;
60313+ atomic_unchecked_t allocmiss;
60314+ atomic_unchecked_t freehit;
60315+ atomic_unchecked_t freemiss;
60316
60317 /*
60318 * If debugging is enabled, then the allocator can add additional
60319diff -urNp linux-2.6.32.46/include/linux/slab.h linux-2.6.32.46/include/linux/slab.h
60320--- linux-2.6.32.46/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
60321+++ linux-2.6.32.46/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
60322@@ -11,12 +11,20 @@
60323
60324 #include <linux/gfp.h>
60325 #include <linux/types.h>
60326+#include <linux/err.h>
60327
60328 /*
60329 * Flags to pass to kmem_cache_create().
60330 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60331 */
60332 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60333+
60334+#ifdef CONFIG_PAX_USERCOPY
60335+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60336+#else
60337+#define SLAB_USERCOPY 0x00000000UL
60338+#endif
60339+
60340 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60341 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60342 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60343@@ -82,10 +90,13 @@
60344 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60345 * Both make kfree a no-op.
60346 */
60347-#define ZERO_SIZE_PTR ((void *)16)
60348+#define ZERO_SIZE_PTR \
60349+({ \
60350+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60351+ (void *)(-MAX_ERRNO-1L); \
60352+})
60353
60354-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60355- (unsigned long)ZERO_SIZE_PTR)
60356+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60357
60358 /*
60359 * struct kmem_cache related prototypes
60360@@ -138,6 +149,7 @@ void * __must_check krealloc(const void
60361 void kfree(const void *);
60362 void kzfree(const void *);
60363 size_t ksize(const void *);
60364+void check_object_size(const void *ptr, unsigned long n, bool to);
60365
60366 /*
60367 * Allocator specific definitions. These are mainly used to establish optimized
60368@@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
60369
60370 void __init kmem_cache_init_late(void);
60371
60372+#define kmalloc(x, y) \
60373+({ \
60374+ void *___retval; \
60375+ intoverflow_t ___x = (intoverflow_t)x; \
60376+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
60377+ ___retval = NULL; \
60378+ else \
60379+ ___retval = kmalloc((size_t)___x, (y)); \
60380+ ___retval; \
60381+})
60382+
60383+#define kmalloc_node(x, y, z) \
60384+({ \
60385+ void *___retval; \
60386+ intoverflow_t ___x = (intoverflow_t)x; \
60387+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60388+ ___retval = NULL; \
60389+ else \
60390+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
60391+ ___retval; \
60392+})
60393+
60394+#define kzalloc(x, y) \
60395+({ \
60396+ void *___retval; \
60397+ intoverflow_t ___x = (intoverflow_t)x; \
60398+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
60399+ ___retval = NULL; \
60400+ else \
60401+ ___retval = kzalloc((size_t)___x, (y)); \
60402+ ___retval; \
60403+})
60404+
60405 #endif /* _LINUX_SLAB_H */
60406diff -urNp linux-2.6.32.46/include/linux/slub_def.h linux-2.6.32.46/include/linux/slub_def.h
60407--- linux-2.6.32.46/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
60408+++ linux-2.6.32.46/include/linux/slub_def.h 2011-08-05 20:33:55.000000000 -0400
60409@@ -86,7 +86,7 @@ struct kmem_cache {
60410 struct kmem_cache_order_objects max;
60411 struct kmem_cache_order_objects min;
60412 gfp_t allocflags; /* gfp flags to use on each alloc */
60413- int refcount; /* Refcount for slab cache destroy */
60414+ atomic_t refcount; /* Refcount for slab cache destroy */
60415 void (*ctor)(void *);
60416 int inuse; /* Offset to metadata */
60417 int align; /* Alignment */
60418@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache
60419 #endif
60420
60421 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60422-void *__kmalloc(size_t size, gfp_t flags);
60423+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60424
60425 #ifdef CONFIG_KMEMTRACE
60426 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
60427diff -urNp linux-2.6.32.46/include/linux/sonet.h linux-2.6.32.46/include/linux/sonet.h
60428--- linux-2.6.32.46/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
60429+++ linux-2.6.32.46/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
60430@@ -61,7 +61,7 @@ struct sonet_stats {
60431 #include <asm/atomic.h>
60432
60433 struct k_sonet_stats {
60434-#define __HANDLE_ITEM(i) atomic_t i
60435+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60436 __SONET_ITEMS
60437 #undef __HANDLE_ITEM
60438 };
60439diff -urNp linux-2.6.32.46/include/linux/sunrpc/cache.h linux-2.6.32.46/include/linux/sunrpc/cache.h
60440--- linux-2.6.32.46/include/linux/sunrpc/cache.h 2011-03-27 14:31:47.000000000 -0400
60441+++ linux-2.6.32.46/include/linux/sunrpc/cache.h 2011-08-05 20:33:55.000000000 -0400
60442@@ -125,7 +125,7 @@ struct cache_detail {
60443 */
60444 struct cache_req {
60445 struct cache_deferred_req *(*defer)(struct cache_req *req);
60446-};
60447+} __no_const;
60448 /* this must be embedded in a deferred_request that is being
60449 * delayed awaiting cache-fill
60450 */
60451diff -urNp linux-2.6.32.46/include/linux/sunrpc/clnt.h linux-2.6.32.46/include/linux/sunrpc/clnt.h
60452--- linux-2.6.32.46/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
60453+++ linux-2.6.32.46/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
60454@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
60455 {
60456 switch (sap->sa_family) {
60457 case AF_INET:
60458- return ntohs(((struct sockaddr_in *)sap)->sin_port);
60459+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60460 case AF_INET6:
60461- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60462+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60463 }
60464 return 0;
60465 }
60466@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
60467 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60468 const struct sockaddr *src)
60469 {
60470- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60471+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60472 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60473
60474 dsin->sin_family = ssin->sin_family;
60475@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
60476 if (sa->sa_family != AF_INET6)
60477 return 0;
60478
60479- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60480+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60481 }
60482
60483 #endif /* __KERNEL__ */
60484diff -urNp linux-2.6.32.46/include/linux/sunrpc/svc_rdma.h linux-2.6.32.46/include/linux/sunrpc/svc_rdma.h
60485--- linux-2.6.32.46/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
60486+++ linux-2.6.32.46/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
60487@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60488 extern unsigned int svcrdma_max_requests;
60489 extern unsigned int svcrdma_max_req_size;
60490
60491-extern atomic_t rdma_stat_recv;
60492-extern atomic_t rdma_stat_read;
60493-extern atomic_t rdma_stat_write;
60494-extern atomic_t rdma_stat_sq_starve;
60495-extern atomic_t rdma_stat_rq_starve;
60496-extern atomic_t rdma_stat_rq_poll;
60497-extern atomic_t rdma_stat_rq_prod;
60498-extern atomic_t rdma_stat_sq_poll;
60499-extern atomic_t rdma_stat_sq_prod;
60500+extern atomic_unchecked_t rdma_stat_recv;
60501+extern atomic_unchecked_t rdma_stat_read;
60502+extern atomic_unchecked_t rdma_stat_write;
60503+extern atomic_unchecked_t rdma_stat_sq_starve;
60504+extern atomic_unchecked_t rdma_stat_rq_starve;
60505+extern atomic_unchecked_t rdma_stat_rq_poll;
60506+extern atomic_unchecked_t rdma_stat_rq_prod;
60507+extern atomic_unchecked_t rdma_stat_sq_poll;
60508+extern atomic_unchecked_t rdma_stat_sq_prod;
60509
60510 #define RPCRDMA_VERSION 1
60511
60512diff -urNp linux-2.6.32.46/include/linux/suspend.h linux-2.6.32.46/include/linux/suspend.h
60513--- linux-2.6.32.46/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
60514+++ linux-2.6.32.46/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
60515@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
60516 * which require special recovery actions in that situation.
60517 */
60518 struct platform_suspend_ops {
60519- int (*valid)(suspend_state_t state);
60520- int (*begin)(suspend_state_t state);
60521- int (*prepare)(void);
60522- int (*prepare_late)(void);
60523- int (*enter)(suspend_state_t state);
60524- void (*wake)(void);
60525- void (*finish)(void);
60526- void (*end)(void);
60527- void (*recover)(void);
60528+ int (* const valid)(suspend_state_t state);
60529+ int (* const begin)(suspend_state_t state);
60530+ int (* const prepare)(void);
60531+ int (* const prepare_late)(void);
60532+ int (* const enter)(suspend_state_t state);
60533+ void (* const wake)(void);
60534+ void (* const finish)(void);
60535+ void (* const end)(void);
60536+ void (* const recover)(void);
60537 };
60538
60539 #ifdef CONFIG_SUSPEND
60540@@ -120,7 +120,7 @@ struct platform_suspend_ops {
60541 * suspend_set_ops - set platform dependent suspend operations
60542 * @ops: The new suspend operations to set.
60543 */
60544-extern void suspend_set_ops(struct platform_suspend_ops *ops);
60545+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
60546 extern int suspend_valid_only_mem(suspend_state_t state);
60547
60548 /**
60549@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
60550 #else /* !CONFIG_SUSPEND */
60551 #define suspend_valid_only_mem NULL
60552
60553-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
60554+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
60555 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
60556 #endif /* !CONFIG_SUSPEND */
60557
60558@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
60559 * platforms which require special recovery actions in that situation.
60560 */
60561 struct platform_hibernation_ops {
60562- int (*begin)(void);
60563- void (*end)(void);
60564- int (*pre_snapshot)(void);
60565- void (*finish)(void);
60566- int (*prepare)(void);
60567- int (*enter)(void);
60568- void (*leave)(void);
60569- int (*pre_restore)(void);
60570- void (*restore_cleanup)(void);
60571- void (*recover)(void);
60572+ int (* const begin)(void);
60573+ void (* const end)(void);
60574+ int (* const pre_snapshot)(void);
60575+ void (* const finish)(void);
60576+ int (* const prepare)(void);
60577+ int (* const enter)(void);
60578+ void (* const leave)(void);
60579+ int (* const pre_restore)(void);
60580+ void (* const restore_cleanup)(void);
60581+ void (* const recover)(void);
60582 };
60583
60584 #ifdef CONFIG_HIBERNATION
60585@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
60586 extern void swsusp_unset_page_free(struct page *);
60587 extern unsigned long get_safe_page(gfp_t gfp_mask);
60588
60589-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
60590+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
60591 extern int hibernate(void);
60592 extern bool system_entering_hibernation(void);
60593 #else /* CONFIG_HIBERNATION */
60594@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
60595 static inline void swsusp_set_page_free(struct page *p) {}
60596 static inline void swsusp_unset_page_free(struct page *p) {}
60597
60598-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
60599+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
60600 static inline int hibernate(void) { return -ENOSYS; }
60601 static inline bool system_entering_hibernation(void) { return false; }
60602 #endif /* CONFIG_HIBERNATION */
60603diff -urNp linux-2.6.32.46/include/linux/sysctl.h linux-2.6.32.46/include/linux/sysctl.h
60604--- linux-2.6.32.46/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
60605+++ linux-2.6.32.46/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
60606@@ -164,7 +164,11 @@ enum
60607 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
60608 };
60609
60610-
60611+#ifdef CONFIG_PAX_SOFTMODE
60612+enum {
60613+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
60614+};
60615+#endif
60616
60617 /* CTL_VM names: */
60618 enum
60619@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
60620
60621 extern int proc_dostring(struct ctl_table *, int,
60622 void __user *, size_t *, loff_t *);
60623+extern int proc_dostring_modpriv(struct ctl_table *, int,
60624+ void __user *, size_t *, loff_t *);
60625 extern int proc_dointvec(struct ctl_table *, int,
60626 void __user *, size_t *, loff_t *);
60627 extern int proc_dointvec_minmax(struct ctl_table *, int,
60628@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
60629
60630 extern ctl_handler sysctl_data;
60631 extern ctl_handler sysctl_string;
60632+extern ctl_handler sysctl_string_modpriv;
60633 extern ctl_handler sysctl_intvec;
60634 extern ctl_handler sysctl_jiffies;
60635 extern ctl_handler sysctl_ms_jiffies;
60636diff -urNp linux-2.6.32.46/include/linux/sysfs.h linux-2.6.32.46/include/linux/sysfs.h
60637--- linux-2.6.32.46/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
60638+++ linux-2.6.32.46/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
60639@@ -75,8 +75,8 @@ struct bin_attribute {
60640 };
60641
60642 struct sysfs_ops {
60643- ssize_t (*show)(struct kobject *, struct attribute *,char *);
60644- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
60645+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
60646+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
60647 };
60648
60649 struct sysfs_dirent;
60650diff -urNp linux-2.6.32.46/include/linux/thread_info.h linux-2.6.32.46/include/linux/thread_info.h
60651--- linux-2.6.32.46/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
60652+++ linux-2.6.32.46/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
60653@@ -23,7 +23,7 @@ struct restart_block {
60654 };
60655 /* For futex_wait and futex_wait_requeue_pi */
60656 struct {
60657- u32 *uaddr;
60658+ u32 __user *uaddr;
60659 u32 val;
60660 u32 flags;
60661 u32 bitset;
60662diff -urNp linux-2.6.32.46/include/linux/tty.h linux-2.6.32.46/include/linux/tty.h
60663--- linux-2.6.32.46/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
60664+++ linux-2.6.32.46/include/linux/tty.h 2011-08-05 20:33:55.000000000 -0400
60665@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
60666 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
60667 extern void tty_ldisc_enable(struct tty_struct *tty);
60668
60669-
60670 /* n_tty.c */
60671 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
60672
60673diff -urNp linux-2.6.32.46/include/linux/tty_ldisc.h linux-2.6.32.46/include/linux/tty_ldisc.h
60674--- linux-2.6.32.46/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
60675+++ linux-2.6.32.46/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
60676@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
60677
60678 struct module *owner;
60679
60680- int refcount;
60681+ atomic_t refcount;
60682 };
60683
60684 struct tty_ldisc {
60685diff -urNp linux-2.6.32.46/include/linux/types.h linux-2.6.32.46/include/linux/types.h
60686--- linux-2.6.32.46/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
60687+++ linux-2.6.32.46/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
60688@@ -191,10 +191,26 @@ typedef struct {
60689 volatile int counter;
60690 } atomic_t;
60691
60692+#ifdef CONFIG_PAX_REFCOUNT
60693+typedef struct {
60694+ volatile int counter;
60695+} atomic_unchecked_t;
60696+#else
60697+typedef atomic_t atomic_unchecked_t;
60698+#endif
60699+
60700 #ifdef CONFIG_64BIT
60701 typedef struct {
60702 volatile long counter;
60703 } atomic64_t;
60704+
60705+#ifdef CONFIG_PAX_REFCOUNT
60706+typedef struct {
60707+ volatile long counter;
60708+} atomic64_unchecked_t;
60709+#else
60710+typedef atomic64_t atomic64_unchecked_t;
60711+#endif
60712 #endif
60713
60714 struct ustat {
60715diff -urNp linux-2.6.32.46/include/linux/uaccess.h linux-2.6.32.46/include/linux/uaccess.h
60716--- linux-2.6.32.46/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
60717+++ linux-2.6.32.46/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
60718@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
60719 long ret; \
60720 mm_segment_t old_fs = get_fs(); \
60721 \
60722- set_fs(KERNEL_DS); \
60723 pagefault_disable(); \
60724+ set_fs(KERNEL_DS); \
60725 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
60726- pagefault_enable(); \
60727 set_fs(old_fs); \
60728+ pagefault_enable(); \
60729 ret; \
60730 })
60731
60732@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
60733 * Safely read from address @src to the buffer at @dst. If a kernel fault
60734 * happens, handle that and return -EFAULT.
60735 */
60736-extern long probe_kernel_read(void *dst, void *src, size_t size);
60737+extern long probe_kernel_read(void *dst, const void *src, size_t size);
60738
60739 /*
60740 * probe_kernel_write(): safely attempt to write to a location
60741@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
60742 * Safely write to address @dst from the buffer at @src. If a kernel fault
60743 * happens, handle that and return -EFAULT.
60744 */
60745-extern long probe_kernel_write(void *dst, void *src, size_t size);
60746+extern long probe_kernel_write(void *dst, const void *src, size_t size);
60747
60748 #endif /* __LINUX_UACCESS_H__ */
60749diff -urNp linux-2.6.32.46/include/linux/unaligned/access_ok.h linux-2.6.32.46/include/linux/unaligned/access_ok.h
60750--- linux-2.6.32.46/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
60751+++ linux-2.6.32.46/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
60752@@ -6,32 +6,32 @@
60753
60754 static inline u16 get_unaligned_le16(const void *p)
60755 {
60756- return le16_to_cpup((__le16 *)p);
60757+ return le16_to_cpup((const __le16 *)p);
60758 }
60759
60760 static inline u32 get_unaligned_le32(const void *p)
60761 {
60762- return le32_to_cpup((__le32 *)p);
60763+ return le32_to_cpup((const __le32 *)p);
60764 }
60765
60766 static inline u64 get_unaligned_le64(const void *p)
60767 {
60768- return le64_to_cpup((__le64 *)p);
60769+ return le64_to_cpup((const __le64 *)p);
60770 }
60771
60772 static inline u16 get_unaligned_be16(const void *p)
60773 {
60774- return be16_to_cpup((__be16 *)p);
60775+ return be16_to_cpup((const __be16 *)p);
60776 }
60777
60778 static inline u32 get_unaligned_be32(const void *p)
60779 {
60780- return be32_to_cpup((__be32 *)p);
60781+ return be32_to_cpup((const __be32 *)p);
60782 }
60783
60784 static inline u64 get_unaligned_be64(const void *p)
60785 {
60786- return be64_to_cpup((__be64 *)p);
60787+ return be64_to_cpup((const __be64 *)p);
60788 }
60789
60790 static inline void put_unaligned_le16(u16 val, void *p)
60791diff -urNp linux-2.6.32.46/include/linux/vmalloc.h linux-2.6.32.46/include/linux/vmalloc.h
60792--- linux-2.6.32.46/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
60793+++ linux-2.6.32.46/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
60794@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
60795 #define VM_MAP 0x00000004 /* vmap()ed pages */
60796 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
60797 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
60798+
60799+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
60800+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
60801+#endif
60802+
60803 /* bits [20..32] reserved for arch specific ioremap internals */
60804
60805 /*
60806@@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
60807
60808 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
60809
60810+#define vmalloc(x) \
60811+({ \
60812+ void *___retval; \
60813+ intoverflow_t ___x = (intoverflow_t)x; \
60814+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
60815+ ___retval = NULL; \
60816+ else \
60817+ ___retval = vmalloc((unsigned long)___x); \
60818+ ___retval; \
60819+})
60820+
60821+#define __vmalloc(x, y, z) \
60822+({ \
60823+ void *___retval; \
60824+ intoverflow_t ___x = (intoverflow_t)x; \
60825+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
60826+ ___retval = NULL; \
60827+ else \
60828+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
60829+ ___retval; \
60830+})
60831+
60832+#define vmalloc_user(x) \
60833+({ \
60834+ void *___retval; \
60835+ intoverflow_t ___x = (intoverflow_t)x; \
60836+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
60837+ ___retval = NULL; \
60838+ else \
60839+ ___retval = vmalloc_user((unsigned long)___x); \
60840+ ___retval; \
60841+})
60842+
60843+#define vmalloc_exec(x) \
60844+({ \
60845+ void *___retval; \
60846+ intoverflow_t ___x = (intoverflow_t)x; \
60847+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
60848+ ___retval = NULL; \
60849+ else \
60850+ ___retval = vmalloc_exec((unsigned long)___x); \
60851+ ___retval; \
60852+})
60853+
60854+#define vmalloc_node(x, y) \
60855+({ \
60856+ void *___retval; \
60857+ intoverflow_t ___x = (intoverflow_t)x; \
60858+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
60859+ ___retval = NULL; \
60860+ else \
60861+ ___retval = vmalloc_node((unsigned long)___x, (y));\
60862+ ___retval; \
60863+})
60864+
60865+#define vmalloc_32(x) \
60866+({ \
60867+ void *___retval; \
60868+ intoverflow_t ___x = (intoverflow_t)x; \
60869+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
60870+ ___retval = NULL; \
60871+ else \
60872+ ___retval = vmalloc_32((unsigned long)___x); \
60873+ ___retval; \
60874+})
60875+
60876+#define vmalloc_32_user(x) \
60877+({ \
60878+ void *___retval; \
60879+ intoverflow_t ___x = (intoverflow_t)x; \
60880+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
60881+ ___retval = NULL; \
60882+ else \
60883+ ___retval = vmalloc_32_user((unsigned long)___x);\
60884+ ___retval; \
60885+})
60886+
60887 #endif /* _LINUX_VMALLOC_H */
60888diff -urNp linux-2.6.32.46/include/linux/vmstat.h linux-2.6.32.46/include/linux/vmstat.h
60889--- linux-2.6.32.46/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
60890+++ linux-2.6.32.46/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
60891@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
60892 /*
60893 * Zone based page accounting with per cpu differentials.
60894 */
60895-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60896+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60897
60898 static inline void zone_page_state_add(long x, struct zone *zone,
60899 enum zone_stat_item item)
60900 {
60901- atomic_long_add(x, &zone->vm_stat[item]);
60902- atomic_long_add(x, &vm_stat[item]);
60903+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
60904+ atomic_long_add_unchecked(x, &vm_stat[item]);
60905 }
60906
60907 static inline unsigned long global_page_state(enum zone_stat_item item)
60908 {
60909- long x = atomic_long_read(&vm_stat[item]);
60910+ long x = atomic_long_read_unchecked(&vm_stat[item]);
60911 #ifdef CONFIG_SMP
60912 if (x < 0)
60913 x = 0;
60914@@ -158,7 +158,7 @@ static inline unsigned long global_page_
60915 static inline unsigned long zone_page_state(struct zone *zone,
60916 enum zone_stat_item item)
60917 {
60918- long x = atomic_long_read(&zone->vm_stat[item]);
60919+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60920 #ifdef CONFIG_SMP
60921 if (x < 0)
60922 x = 0;
60923@@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
60924 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
60925 enum zone_stat_item item)
60926 {
60927- long x = atomic_long_read(&zone->vm_stat[item]);
60928+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
60929
60930 #ifdef CONFIG_SMP
60931 int cpu;
60932@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
60933
60934 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
60935 {
60936- atomic_long_inc(&zone->vm_stat[item]);
60937- atomic_long_inc(&vm_stat[item]);
60938+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
60939+ atomic_long_inc_unchecked(&vm_stat[item]);
60940 }
60941
60942 static inline void __inc_zone_page_state(struct page *page,
60943@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
60944
60945 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
60946 {
60947- atomic_long_dec(&zone->vm_stat[item]);
60948- atomic_long_dec(&vm_stat[item]);
60949+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
60950+ atomic_long_dec_unchecked(&vm_stat[item]);
60951 }
60952
60953 static inline void __dec_zone_page_state(struct page *page,
60954diff -urNp linux-2.6.32.46/include/media/saa7146_vv.h linux-2.6.32.46/include/media/saa7146_vv.h
60955--- linux-2.6.32.46/include/media/saa7146_vv.h 2011-03-27 14:31:47.000000000 -0400
60956+++ linux-2.6.32.46/include/media/saa7146_vv.h 2011-08-23 21:22:38.000000000 -0400
60957@@ -167,7 +167,7 @@ struct saa7146_ext_vv
60958 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
60959
60960 /* the extension can override this */
60961- struct v4l2_ioctl_ops ops;
60962+ v4l2_ioctl_ops_no_const ops;
60963 /* pointer to the saa7146 core ops */
60964 const struct v4l2_ioctl_ops *core_ops;
60965
60966diff -urNp linux-2.6.32.46/include/media/v4l2-dev.h linux-2.6.32.46/include/media/v4l2-dev.h
60967--- linux-2.6.32.46/include/media/v4l2-dev.h 2011-03-27 14:31:47.000000000 -0400
60968+++ linux-2.6.32.46/include/media/v4l2-dev.h 2011-08-05 20:33:55.000000000 -0400
60969@@ -34,7 +34,7 @@ struct v4l2_device;
60970 #define V4L2_FL_UNREGISTERED (0)
60971
60972 struct v4l2_file_operations {
60973- struct module *owner;
60974+ struct module * const owner;
60975 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
60976 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
60977 unsigned int (*poll) (struct file *, struct poll_table_struct *);
60978diff -urNp linux-2.6.32.46/include/media/v4l2-device.h linux-2.6.32.46/include/media/v4l2-device.h
60979--- linux-2.6.32.46/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
60980+++ linux-2.6.32.46/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
60981@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
60982 this function returns 0. If the name ends with a digit (e.g. cx18),
60983 then the name will be set to cx18-0 since cx180 looks really odd. */
60984 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
60985- atomic_t *instance);
60986+ atomic_unchecked_t *instance);
60987
60988 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
60989 Since the parent disappears this ensures that v4l2_dev doesn't have an
60990diff -urNp linux-2.6.32.46/include/media/v4l2-ioctl.h linux-2.6.32.46/include/media/v4l2-ioctl.h
60991--- linux-2.6.32.46/include/media/v4l2-ioctl.h 2011-03-27 14:31:47.000000000 -0400
60992+++ linux-2.6.32.46/include/media/v4l2-ioctl.h 2011-08-23 21:22:38.000000000 -0400
60993@@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
60994 long (*vidioc_default) (struct file *file, void *fh,
60995 int cmd, void *arg);
60996 };
60997+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
60998
60999
61000 /* v4l debugging and diagnostics */
61001diff -urNp linux-2.6.32.46/include/net/flow.h linux-2.6.32.46/include/net/flow.h
61002--- linux-2.6.32.46/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
61003+++ linux-2.6.32.46/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
61004@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
61005 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
61006 u8 dir, flow_resolve_t resolver);
61007 extern void flow_cache_flush(void);
61008-extern atomic_t flow_cache_genid;
61009+extern atomic_unchecked_t flow_cache_genid;
61010
61011 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
61012 {
61013diff -urNp linux-2.6.32.46/include/net/inetpeer.h linux-2.6.32.46/include/net/inetpeer.h
61014--- linux-2.6.32.46/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
61015+++ linux-2.6.32.46/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
61016@@ -24,7 +24,7 @@ struct inet_peer
61017 __u32 dtime; /* the time of last use of not
61018 * referenced entries */
61019 atomic_t refcnt;
61020- atomic_t rid; /* Frag reception counter */
61021+ atomic_unchecked_t rid; /* Frag reception counter */
61022 __u32 tcp_ts;
61023 unsigned long tcp_ts_stamp;
61024 };
61025diff -urNp linux-2.6.32.46/include/net/ip_vs.h linux-2.6.32.46/include/net/ip_vs.h
61026--- linux-2.6.32.46/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
61027+++ linux-2.6.32.46/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
61028@@ -365,7 +365,7 @@ struct ip_vs_conn {
61029 struct ip_vs_conn *control; /* Master control connection */
61030 atomic_t n_control; /* Number of controlled ones */
61031 struct ip_vs_dest *dest; /* real server */
61032- atomic_t in_pkts; /* incoming packet counter */
61033+ atomic_unchecked_t in_pkts; /* incoming packet counter */
61034
61035 /* packet transmitter for different forwarding methods. If it
61036 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61037@@ -466,7 +466,7 @@ struct ip_vs_dest {
61038 union nf_inet_addr addr; /* IP address of the server */
61039 __be16 port; /* port number of the server */
61040 volatile unsigned flags; /* dest status flags */
61041- atomic_t conn_flags; /* flags to copy to conn */
61042+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
61043 atomic_t weight; /* server weight */
61044
61045 atomic_t refcnt; /* reference counter */
61046diff -urNp linux-2.6.32.46/include/net/irda/ircomm_core.h linux-2.6.32.46/include/net/irda/ircomm_core.h
61047--- linux-2.6.32.46/include/net/irda/ircomm_core.h 2011-03-27 14:31:47.000000000 -0400
61048+++ linux-2.6.32.46/include/net/irda/ircomm_core.h 2011-08-05 20:33:55.000000000 -0400
61049@@ -51,7 +51,7 @@ typedef struct {
61050 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61051 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61052 struct ircomm_info *);
61053-} call_t;
61054+} __no_const call_t;
61055
61056 struct ircomm_cb {
61057 irda_queue_t queue;
61058diff -urNp linux-2.6.32.46/include/net/irda/ircomm_tty.h linux-2.6.32.46/include/net/irda/ircomm_tty.h
61059--- linux-2.6.32.46/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
61060+++ linux-2.6.32.46/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
61061@@ -35,6 +35,7 @@
61062 #include <linux/termios.h>
61063 #include <linux/timer.h>
61064 #include <linux/tty.h> /* struct tty_struct */
61065+#include <asm/local.h>
61066
61067 #include <net/irda/irias_object.h>
61068 #include <net/irda/ircomm_core.h>
61069@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61070 unsigned short close_delay;
61071 unsigned short closing_wait; /* time to wait before closing */
61072
61073- int open_count;
61074- int blocked_open; /* # of blocked opens */
61075+ local_t open_count;
61076+ local_t blocked_open; /* # of blocked opens */
61077
61078 /* Protect concurent access to :
61079 * o self->open_count
61080diff -urNp linux-2.6.32.46/include/net/iucv/af_iucv.h linux-2.6.32.46/include/net/iucv/af_iucv.h
61081--- linux-2.6.32.46/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
61082+++ linux-2.6.32.46/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
61083@@ -87,7 +87,7 @@ struct iucv_sock {
61084 struct iucv_sock_list {
61085 struct hlist_head head;
61086 rwlock_t lock;
61087- atomic_t autobind_name;
61088+ atomic_unchecked_t autobind_name;
61089 };
61090
61091 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61092diff -urNp linux-2.6.32.46/include/net/lapb.h linux-2.6.32.46/include/net/lapb.h
61093--- linux-2.6.32.46/include/net/lapb.h 2011-03-27 14:31:47.000000000 -0400
61094+++ linux-2.6.32.46/include/net/lapb.h 2011-08-05 20:33:55.000000000 -0400
61095@@ -95,7 +95,7 @@ struct lapb_cb {
61096 struct sk_buff_head write_queue;
61097 struct sk_buff_head ack_queue;
61098 unsigned char window;
61099- struct lapb_register_struct callbacks;
61100+ struct lapb_register_struct *callbacks;
61101
61102 /* FRMR control information */
61103 struct lapb_frame frmr_data;
61104diff -urNp linux-2.6.32.46/include/net/neighbour.h linux-2.6.32.46/include/net/neighbour.h
61105--- linux-2.6.32.46/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
61106+++ linux-2.6.32.46/include/net/neighbour.h 2011-08-26 20:29:08.000000000 -0400
61107@@ -131,7 +131,7 @@ struct neigh_ops
61108 int (*connected_output)(struct sk_buff*);
61109 int (*hh_output)(struct sk_buff*);
61110 int (*queue_xmit)(struct sk_buff*);
61111-};
61112+} __do_const;
61113
61114 struct pneigh_entry
61115 {
61116diff -urNp linux-2.6.32.46/include/net/netlink.h linux-2.6.32.46/include/net/netlink.h
61117--- linux-2.6.32.46/include/net/netlink.h 2011-07-13 17:23:04.000000000 -0400
61118+++ linux-2.6.32.46/include/net/netlink.h 2011-08-21 18:08:11.000000000 -0400
61119@@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct
61120 {
61121 return (remaining >= (int) sizeof(struct nlmsghdr) &&
61122 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
61123- nlh->nlmsg_len <= remaining);
61124+ nlh->nlmsg_len <= (unsigned int)remaining);
61125 }
61126
61127 /**
61128@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
61129 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61130 {
61131 if (mark)
61132- skb_trim(skb, (unsigned char *) mark - skb->data);
61133+ skb_trim(skb, (const unsigned char *) mark - skb->data);
61134 }
61135
61136 /**
61137diff -urNp linux-2.6.32.46/include/net/netns/ipv4.h linux-2.6.32.46/include/net/netns/ipv4.h
61138--- linux-2.6.32.46/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
61139+++ linux-2.6.32.46/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
61140@@ -54,7 +54,7 @@ struct netns_ipv4 {
61141 int current_rt_cache_rebuild_count;
61142
61143 struct timer_list rt_secret_timer;
61144- atomic_t rt_genid;
61145+ atomic_unchecked_t rt_genid;
61146
61147 #ifdef CONFIG_IP_MROUTE
61148 struct sock *mroute_sk;
61149diff -urNp linux-2.6.32.46/include/net/sctp/sctp.h linux-2.6.32.46/include/net/sctp/sctp.h
61150--- linux-2.6.32.46/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
61151+++ linux-2.6.32.46/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
61152@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
61153
61154 #else /* SCTP_DEBUG */
61155
61156-#define SCTP_DEBUG_PRINTK(whatever...)
61157-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61158+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61159+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61160 #define SCTP_ENABLE_DEBUG
61161 #define SCTP_DISABLE_DEBUG
61162 #define SCTP_ASSERT(expr, str, func)
61163diff -urNp linux-2.6.32.46/include/net/secure_seq.h linux-2.6.32.46/include/net/secure_seq.h
61164--- linux-2.6.32.46/include/net/secure_seq.h 2011-08-16 20:37:25.000000000 -0400
61165+++ linux-2.6.32.46/include/net/secure_seq.h 2011-08-07 19:48:09.000000000 -0400
61166@@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
61167 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
61168 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
61169 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
61170- __be16 dport);
61171+ __be16 dport);
61172 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
61173 __be16 sport, __be16 dport);
61174 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61175- __be16 sport, __be16 dport);
61176+ __be16 sport, __be16 dport);
61177 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
61178- __be16 sport, __be16 dport);
61179+ __be16 sport, __be16 dport);
61180 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61181- __be16 sport, __be16 dport);
61182+ __be16 sport, __be16 dport);
61183
61184 #endif /* _NET_SECURE_SEQ */
61185diff -urNp linux-2.6.32.46/include/net/sock.h linux-2.6.32.46/include/net/sock.h
61186--- linux-2.6.32.46/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
61187+++ linux-2.6.32.46/include/net/sock.h 2011-08-21 17:24:37.000000000 -0400
61188@@ -272,7 +272,7 @@ struct sock {
61189 rwlock_t sk_callback_lock;
61190 int sk_err,
61191 sk_err_soft;
61192- atomic_t sk_drops;
61193+ atomic_unchecked_t sk_drops;
61194 unsigned short sk_ack_backlog;
61195 unsigned short sk_max_ack_backlog;
61196 __u32 sk_priority;
61197@@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_relea
61198 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
61199 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
61200 #else
61201-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
61202+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
61203 int inc)
61204 {
61205 }
61206diff -urNp linux-2.6.32.46/include/net/tcp.h linux-2.6.32.46/include/net/tcp.h
61207--- linux-2.6.32.46/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
61208+++ linux-2.6.32.46/include/net/tcp.h 2011-08-23 21:29:10.000000000 -0400
61209@@ -1444,8 +1444,8 @@ enum tcp_seq_states {
61210 struct tcp_seq_afinfo {
61211 char *name;
61212 sa_family_t family;
61213- struct file_operations seq_fops;
61214- struct seq_operations seq_ops;
61215+ file_operations_no_const seq_fops;
61216+ seq_operations_no_const seq_ops;
61217 };
61218
61219 struct tcp_iter_state {
61220diff -urNp linux-2.6.32.46/include/net/udp.h linux-2.6.32.46/include/net/udp.h
61221--- linux-2.6.32.46/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
61222+++ linux-2.6.32.46/include/net/udp.h 2011-08-23 21:29:34.000000000 -0400
61223@@ -187,8 +187,8 @@ struct udp_seq_afinfo {
61224 char *name;
61225 sa_family_t family;
61226 struct udp_table *udp_table;
61227- struct file_operations seq_fops;
61228- struct seq_operations seq_ops;
61229+ file_operations_no_const seq_fops;
61230+ seq_operations_no_const seq_ops;
61231 };
61232
61233 struct udp_iter_state {
61234diff -urNp linux-2.6.32.46/include/rdma/iw_cm.h linux-2.6.32.46/include/rdma/iw_cm.h
61235--- linux-2.6.32.46/include/rdma/iw_cm.h 2011-03-27 14:31:47.000000000 -0400
61236+++ linux-2.6.32.46/include/rdma/iw_cm.h 2011-08-05 20:33:55.000000000 -0400
61237@@ -129,7 +129,7 @@ struct iw_cm_verbs {
61238 int backlog);
61239
61240 int (*destroy_listen)(struct iw_cm_id *cm_id);
61241-};
61242+} __no_const;
61243
61244 /**
61245 * iw_create_cm_id - Create an IW CM identifier.
61246diff -urNp linux-2.6.32.46/include/scsi/libfc.h linux-2.6.32.46/include/scsi/libfc.h
61247--- linux-2.6.32.46/include/scsi/libfc.h 2011-03-27 14:31:47.000000000 -0400
61248+++ linux-2.6.32.46/include/scsi/libfc.h 2011-08-23 21:22:38.000000000 -0400
61249@@ -675,6 +675,7 @@ struct libfc_function_template {
61250 */
61251 void (*disc_stop_final) (struct fc_lport *);
61252 };
61253+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
61254
61255 /* information used by the discovery layer */
61256 struct fc_disc {
61257@@ -707,7 +708,7 @@ struct fc_lport {
61258 struct fc_disc disc;
61259
61260 /* Operational Information */
61261- struct libfc_function_template tt;
61262+ libfc_function_template_no_const tt;
61263 u8 link_up;
61264 u8 qfull;
61265 enum fc_lport_state state;
61266diff -urNp linux-2.6.32.46/include/scsi/scsi_device.h linux-2.6.32.46/include/scsi/scsi_device.h
61267--- linux-2.6.32.46/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
61268+++ linux-2.6.32.46/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
61269@@ -156,9 +156,9 @@ struct scsi_device {
61270 unsigned int max_device_blocked; /* what device_blocked counts down from */
61271 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61272
61273- atomic_t iorequest_cnt;
61274- atomic_t iodone_cnt;
61275- atomic_t ioerr_cnt;
61276+ atomic_unchecked_t iorequest_cnt;
61277+ atomic_unchecked_t iodone_cnt;
61278+ atomic_unchecked_t ioerr_cnt;
61279
61280 struct device sdev_gendev,
61281 sdev_dev;
61282diff -urNp linux-2.6.32.46/include/scsi/scsi_transport_fc.h linux-2.6.32.46/include/scsi/scsi_transport_fc.h
61283--- linux-2.6.32.46/include/scsi/scsi_transport_fc.h 2011-03-27 14:31:47.000000000 -0400
61284+++ linux-2.6.32.46/include/scsi/scsi_transport_fc.h 2011-08-26 20:19:09.000000000 -0400
61285@@ -708,7 +708,7 @@ struct fc_function_template {
61286 unsigned long show_host_system_hostname:1;
61287
61288 unsigned long disable_target_scan:1;
61289-};
61290+} __do_const;
61291
61292
61293 /**
61294diff -urNp linux-2.6.32.46/include/sound/ac97_codec.h linux-2.6.32.46/include/sound/ac97_codec.h
61295--- linux-2.6.32.46/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
61296+++ linux-2.6.32.46/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
61297@@ -419,15 +419,15 @@
61298 struct snd_ac97;
61299
61300 struct snd_ac97_build_ops {
61301- int (*build_3d) (struct snd_ac97 *ac97);
61302- int (*build_specific) (struct snd_ac97 *ac97);
61303- int (*build_spdif) (struct snd_ac97 *ac97);
61304- int (*build_post_spdif) (struct snd_ac97 *ac97);
61305+ int (* const build_3d) (struct snd_ac97 *ac97);
61306+ int (* const build_specific) (struct snd_ac97 *ac97);
61307+ int (* const build_spdif) (struct snd_ac97 *ac97);
61308+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
61309 #ifdef CONFIG_PM
61310- void (*suspend) (struct snd_ac97 *ac97);
61311- void (*resume) (struct snd_ac97 *ac97);
61312+ void (* const suspend) (struct snd_ac97 *ac97);
61313+ void (* const resume) (struct snd_ac97 *ac97);
61314 #endif
61315- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61316+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61317 };
61318
61319 struct snd_ac97_bus_ops {
61320@@ -477,7 +477,7 @@ struct snd_ac97_template {
61321
61322 struct snd_ac97 {
61323 /* -- lowlevel (hardware) driver specific -- */
61324- struct snd_ac97_build_ops * build_ops;
61325+ const struct snd_ac97_build_ops * build_ops;
61326 void *private_data;
61327 void (*private_free) (struct snd_ac97 *ac97);
61328 /* --- */
61329diff -urNp linux-2.6.32.46/include/sound/ak4xxx-adda.h linux-2.6.32.46/include/sound/ak4xxx-adda.h
61330--- linux-2.6.32.46/include/sound/ak4xxx-adda.h 2011-03-27 14:31:47.000000000 -0400
61331+++ linux-2.6.32.46/include/sound/ak4xxx-adda.h 2011-08-05 20:33:55.000000000 -0400
61332@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61333 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61334 unsigned char val);
61335 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61336-};
61337+} __no_const;
61338
61339 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61340
61341diff -urNp linux-2.6.32.46/include/sound/hwdep.h linux-2.6.32.46/include/sound/hwdep.h
61342--- linux-2.6.32.46/include/sound/hwdep.h 2011-03-27 14:31:47.000000000 -0400
61343+++ linux-2.6.32.46/include/sound/hwdep.h 2011-08-05 20:33:55.000000000 -0400
61344@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61345 struct snd_hwdep_dsp_status *status);
61346 int (*dsp_load)(struct snd_hwdep *hw,
61347 struct snd_hwdep_dsp_image *image);
61348-};
61349+} __no_const;
61350
61351 struct snd_hwdep {
61352 struct snd_card *card;
61353diff -urNp linux-2.6.32.46/include/sound/info.h linux-2.6.32.46/include/sound/info.h
61354--- linux-2.6.32.46/include/sound/info.h 2011-03-27 14:31:47.000000000 -0400
61355+++ linux-2.6.32.46/include/sound/info.h 2011-08-05 20:33:55.000000000 -0400
61356@@ -44,7 +44,7 @@ struct snd_info_entry_text {
61357 struct snd_info_buffer *buffer);
61358 void (*write)(struct snd_info_entry *entry,
61359 struct snd_info_buffer *buffer);
61360-};
61361+} __no_const;
61362
61363 struct snd_info_entry_ops {
61364 int (*open)(struct snd_info_entry *entry,
61365diff -urNp linux-2.6.32.46/include/sound/pcm.h linux-2.6.32.46/include/sound/pcm.h
61366--- linux-2.6.32.46/include/sound/pcm.h 2011-03-27 14:31:47.000000000 -0400
61367+++ linux-2.6.32.46/include/sound/pcm.h 2011-08-23 21:22:38.000000000 -0400
61368@@ -80,6 +80,7 @@ struct snd_pcm_ops {
61369 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61370 int (*ack)(struct snd_pcm_substream *substream);
61371 };
61372+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61373
61374 /*
61375 *
61376diff -urNp linux-2.6.32.46/include/sound/sb16_csp.h linux-2.6.32.46/include/sound/sb16_csp.h
61377--- linux-2.6.32.46/include/sound/sb16_csp.h 2011-03-27 14:31:47.000000000 -0400
61378+++ linux-2.6.32.46/include/sound/sb16_csp.h 2011-08-05 20:33:55.000000000 -0400
61379@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
61380 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61381 int (*csp_stop) (struct snd_sb_csp * p);
61382 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61383-};
61384+} __no_const;
61385
61386 /*
61387 * CSP private data
61388diff -urNp linux-2.6.32.46/include/sound/ymfpci.h linux-2.6.32.46/include/sound/ymfpci.h
61389--- linux-2.6.32.46/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
61390+++ linux-2.6.32.46/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
61391@@ -358,7 +358,7 @@ struct snd_ymfpci {
61392 spinlock_t reg_lock;
61393 spinlock_t voice_lock;
61394 wait_queue_head_t interrupt_sleep;
61395- atomic_t interrupt_sleep_count;
61396+ atomic_unchecked_t interrupt_sleep_count;
61397 struct snd_info_entry *proc_entry;
61398 const struct firmware *dsp_microcode;
61399 const struct firmware *controller_microcode;
61400diff -urNp linux-2.6.32.46/include/trace/events/irq.h linux-2.6.32.46/include/trace/events/irq.h
61401--- linux-2.6.32.46/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
61402+++ linux-2.6.32.46/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
61403@@ -34,7 +34,7 @@
61404 */
61405 TRACE_EVENT(irq_handler_entry,
61406
61407- TP_PROTO(int irq, struct irqaction *action),
61408+ TP_PROTO(int irq, const struct irqaction *action),
61409
61410 TP_ARGS(irq, action),
61411
61412@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
61413 */
61414 TRACE_EVENT(irq_handler_exit,
61415
61416- TP_PROTO(int irq, struct irqaction *action, int ret),
61417+ TP_PROTO(int irq, const struct irqaction *action, int ret),
61418
61419 TP_ARGS(irq, action, ret),
61420
61421@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
61422 */
61423 TRACE_EVENT(softirq_entry,
61424
61425- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61426+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61427
61428 TP_ARGS(h, vec),
61429
61430@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
61431 */
61432 TRACE_EVENT(softirq_exit,
61433
61434- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61435+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61436
61437 TP_ARGS(h, vec),
61438
61439diff -urNp linux-2.6.32.46/include/video/uvesafb.h linux-2.6.32.46/include/video/uvesafb.h
61440--- linux-2.6.32.46/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
61441+++ linux-2.6.32.46/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
61442@@ -177,6 +177,7 @@ struct uvesafb_par {
61443 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61444 u8 pmi_setpal; /* PMI for palette changes */
61445 u16 *pmi_base; /* protected mode interface location */
61446+ u8 *pmi_code; /* protected mode code location */
61447 void *pmi_start;
61448 void *pmi_pal;
61449 u8 *vbe_state_orig; /*
61450diff -urNp linux-2.6.32.46/init/do_mounts.c linux-2.6.32.46/init/do_mounts.c
61451--- linux-2.6.32.46/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
61452+++ linux-2.6.32.46/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
61453@@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
61454
61455 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61456 {
61457- int err = sys_mount(name, "/root", fs, flags, data);
61458+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
61459 if (err)
61460 return err;
61461
61462- sys_chdir("/root");
61463+ sys_chdir((__force const char __user *)"/root");
61464 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61465 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61466 current->fs->pwd.mnt->mnt_sb->s_type->name,
61467@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
61468 va_start(args, fmt);
61469 vsprintf(buf, fmt, args);
61470 va_end(args);
61471- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61472+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61473 if (fd >= 0) {
61474 sys_ioctl(fd, FDEJECT, 0);
61475 sys_close(fd);
61476 }
61477 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61478- fd = sys_open("/dev/console", O_RDWR, 0);
61479+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
61480 if (fd >= 0) {
61481 sys_ioctl(fd, TCGETS, (long)&termios);
61482 termios.c_lflag &= ~ICANON;
61483 sys_ioctl(fd, TCSETSF, (long)&termios);
61484- sys_read(fd, &c, 1);
61485+ sys_read(fd, (char __user *)&c, 1);
61486 termios.c_lflag |= ICANON;
61487 sys_ioctl(fd, TCSETSF, (long)&termios);
61488 sys_close(fd);
61489@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
61490 mount_root();
61491 out:
61492 devtmpfs_mount("dev");
61493- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61494- sys_chroot(".");
61495+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61496+ sys_chroot((__force char __user *)".");
61497 }
61498diff -urNp linux-2.6.32.46/init/do_mounts.h linux-2.6.32.46/init/do_mounts.h
61499--- linux-2.6.32.46/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
61500+++ linux-2.6.32.46/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
61501@@ -15,15 +15,15 @@ extern int root_mountflags;
61502
61503 static inline int create_dev(char *name, dev_t dev)
61504 {
61505- sys_unlink(name);
61506- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61507+ sys_unlink((__force char __user *)name);
61508+ return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
61509 }
61510
61511 #if BITS_PER_LONG == 32
61512 static inline u32 bstat(char *name)
61513 {
61514 struct stat64 stat;
61515- if (sys_stat64(name, &stat) != 0)
61516+ if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
61517 return 0;
61518 if (!S_ISBLK(stat.st_mode))
61519 return 0;
61520diff -urNp linux-2.6.32.46/init/do_mounts_initrd.c linux-2.6.32.46/init/do_mounts_initrd.c
61521--- linux-2.6.32.46/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
61522+++ linux-2.6.32.46/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
61523@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
61524 sys_close(old_fd);sys_close(root_fd);
61525 sys_close(0);sys_close(1);sys_close(2);
61526 sys_setsid();
61527- (void) sys_open("/dev/console",O_RDWR,0);
61528+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
61529 (void) sys_dup(0);
61530 (void) sys_dup(0);
61531 return kernel_execve(shell, argv, envp_init);
61532@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
61533 create_dev("/dev/root.old", Root_RAM0);
61534 /* mount initrd on rootfs' /root */
61535 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61536- sys_mkdir("/old", 0700);
61537- root_fd = sys_open("/", 0, 0);
61538- old_fd = sys_open("/old", 0, 0);
61539+ sys_mkdir((__force const char __user *)"/old", 0700);
61540+ root_fd = sys_open((__force const char __user *)"/", 0, 0);
61541+ old_fd = sys_open((__force const char __user *)"/old", 0, 0);
61542 /* move initrd over / and chdir/chroot in initrd root */
61543- sys_chdir("/root");
61544- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61545- sys_chroot(".");
61546+ sys_chdir((__force const char __user *)"/root");
61547+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61548+ sys_chroot((__force const char __user *)".");
61549
61550 /*
61551 * In case that a resume from disk is carried out by linuxrc or one of
61552@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
61553
61554 /* move initrd to rootfs' /old */
61555 sys_fchdir(old_fd);
61556- sys_mount("/", ".", NULL, MS_MOVE, NULL);
61557+ sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
61558 /* switch root and cwd back to / of rootfs */
61559 sys_fchdir(root_fd);
61560- sys_chroot(".");
61561+ sys_chroot((__force const char __user *)".");
61562 sys_close(old_fd);
61563 sys_close(root_fd);
61564
61565 if (new_decode_dev(real_root_dev) == Root_RAM0) {
61566- sys_chdir("/old");
61567+ sys_chdir((__force const char __user *)"/old");
61568 return;
61569 }
61570
61571@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
61572 mount_root();
61573
61574 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
61575- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
61576+ error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
61577 if (!error)
61578 printk("okay\n");
61579 else {
61580- int fd = sys_open("/dev/root.old", O_RDWR, 0);
61581+ int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
61582 if (error == -ENOENT)
61583 printk("/initrd does not exist. Ignored.\n");
61584 else
61585 printk("failed\n");
61586 printk(KERN_NOTICE "Unmounting old root\n");
61587- sys_umount("/old", MNT_DETACH);
61588+ sys_umount((__force char __user *)"/old", MNT_DETACH);
61589 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
61590 if (fd < 0) {
61591 error = fd;
61592@@ -119,11 +119,11 @@ int __init initrd_load(void)
61593 * mounted in the normal path.
61594 */
61595 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
61596- sys_unlink("/initrd.image");
61597+ sys_unlink((__force const char __user *)"/initrd.image");
61598 handle_initrd();
61599 return 1;
61600 }
61601 }
61602- sys_unlink("/initrd.image");
61603+ sys_unlink((__force const char __user *)"/initrd.image");
61604 return 0;
61605 }
61606diff -urNp linux-2.6.32.46/init/do_mounts_md.c linux-2.6.32.46/init/do_mounts_md.c
61607--- linux-2.6.32.46/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
61608+++ linux-2.6.32.46/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
61609@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
61610 partitioned ? "_d" : "", minor,
61611 md_setup_args[ent].device_names);
61612
61613- fd = sys_open(name, 0, 0);
61614+ fd = sys_open((__force char __user *)name, 0, 0);
61615 if (fd < 0) {
61616 printk(KERN_ERR "md: open failed - cannot start "
61617 "array %s\n", name);
61618@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
61619 * array without it
61620 */
61621 sys_close(fd);
61622- fd = sys_open(name, 0, 0);
61623+ fd = sys_open((__force char __user *)name, 0, 0);
61624 sys_ioctl(fd, BLKRRPART, 0);
61625 }
61626 sys_close(fd);
61627@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
61628
61629 wait_for_device_probe();
61630
61631- fd = sys_open("/dev/md0", 0, 0);
61632+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
61633 if (fd >= 0) {
61634 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
61635 sys_close(fd);
61636diff -urNp linux-2.6.32.46/init/initramfs.c linux-2.6.32.46/init/initramfs.c
61637--- linux-2.6.32.46/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
61638+++ linux-2.6.32.46/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
61639@@ -74,7 +74,7 @@ static void __init free_hash(void)
61640 }
61641 }
61642
61643-static long __init do_utime(char __user *filename, time_t mtime)
61644+static long __init do_utime(__force char __user *filename, time_t mtime)
61645 {
61646 struct timespec t[2];
61647
61648@@ -109,7 +109,7 @@ static void __init dir_utime(void)
61649 struct dir_entry *de, *tmp;
61650 list_for_each_entry_safe(de, tmp, &dir_list, list) {
61651 list_del(&de->list);
61652- do_utime(de->name, de->mtime);
61653+ do_utime((__force char __user *)de->name, de->mtime);
61654 kfree(de->name);
61655 kfree(de);
61656 }
61657@@ -271,7 +271,7 @@ static int __init maybe_link(void)
61658 if (nlink >= 2) {
61659 char *old = find_link(major, minor, ino, mode, collected);
61660 if (old)
61661- return (sys_link(old, collected) < 0) ? -1 : 1;
61662+ return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
61663 }
61664 return 0;
61665 }
61666@@ -280,11 +280,11 @@ static void __init clean_path(char *path
61667 {
61668 struct stat st;
61669
61670- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
61671+ if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
61672 if (S_ISDIR(st.st_mode))
61673- sys_rmdir(path);
61674+ sys_rmdir((__force char __user *)path);
61675 else
61676- sys_unlink(path);
61677+ sys_unlink((__force char __user *)path);
61678 }
61679 }
61680
61681@@ -305,7 +305,7 @@ static int __init do_name(void)
61682 int openflags = O_WRONLY|O_CREAT;
61683 if (ml != 1)
61684 openflags |= O_TRUNC;
61685- wfd = sys_open(collected, openflags, mode);
61686+ wfd = sys_open((__force char __user *)collected, openflags, mode);
61687
61688 if (wfd >= 0) {
61689 sys_fchown(wfd, uid, gid);
61690@@ -317,17 +317,17 @@ static int __init do_name(void)
61691 }
61692 }
61693 } else if (S_ISDIR(mode)) {
61694- sys_mkdir(collected, mode);
61695- sys_chown(collected, uid, gid);
61696- sys_chmod(collected, mode);
61697+ sys_mkdir((__force char __user *)collected, mode);
61698+ sys_chown((__force char __user *)collected, uid, gid);
61699+ sys_chmod((__force char __user *)collected, mode);
61700 dir_add(collected, mtime);
61701 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
61702 S_ISFIFO(mode) || S_ISSOCK(mode)) {
61703 if (maybe_link() == 0) {
61704- sys_mknod(collected, mode, rdev);
61705- sys_chown(collected, uid, gid);
61706- sys_chmod(collected, mode);
61707- do_utime(collected, mtime);
61708+ sys_mknod((__force char __user *)collected, mode, rdev);
61709+ sys_chown((__force char __user *)collected, uid, gid);
61710+ sys_chmod((__force char __user *)collected, mode);
61711+ do_utime((__force char __user *)collected, mtime);
61712 }
61713 }
61714 return 0;
61715@@ -336,15 +336,15 @@ static int __init do_name(void)
61716 static int __init do_copy(void)
61717 {
61718 if (count >= body_len) {
61719- sys_write(wfd, victim, body_len);
61720+ sys_write(wfd, (__force char __user *)victim, body_len);
61721 sys_close(wfd);
61722- do_utime(vcollected, mtime);
61723+ do_utime((__force char __user *)vcollected, mtime);
61724 kfree(vcollected);
61725 eat(body_len);
61726 state = SkipIt;
61727 return 0;
61728 } else {
61729- sys_write(wfd, victim, count);
61730+ sys_write(wfd, (__force char __user *)victim, count);
61731 body_len -= count;
61732 eat(count);
61733 return 1;
61734@@ -355,9 +355,9 @@ static int __init do_symlink(void)
61735 {
61736 collected[N_ALIGN(name_len) + body_len] = '\0';
61737 clean_path(collected, 0);
61738- sys_symlink(collected + N_ALIGN(name_len), collected);
61739- sys_lchown(collected, uid, gid);
61740- do_utime(collected, mtime);
61741+ sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
61742+ sys_lchown((__force char __user *)collected, uid, gid);
61743+ do_utime((__force char __user *)collected, mtime);
61744 state = SkipIt;
61745 next_state = Reset;
61746 return 0;
61747diff -urNp linux-2.6.32.46/init/Kconfig linux-2.6.32.46/init/Kconfig
61748--- linux-2.6.32.46/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
61749+++ linux-2.6.32.46/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
61750@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
61751
61752 config COMPAT_BRK
61753 bool "Disable heap randomization"
61754- default y
61755+ default n
61756 help
61757 Randomizing heap placement makes heap exploits harder, but it
61758 also breaks ancient binaries (including anything libc5 based).
61759diff -urNp linux-2.6.32.46/init/main.c linux-2.6.32.46/init/main.c
61760--- linux-2.6.32.46/init/main.c 2011-05-10 22:12:01.000000000 -0400
61761+++ linux-2.6.32.46/init/main.c 2011-08-05 20:33:55.000000000 -0400
61762@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
61763 #ifdef CONFIG_TC
61764 extern void tc_init(void);
61765 #endif
61766+extern void grsecurity_init(void);
61767
61768 enum system_states system_state __read_mostly;
61769 EXPORT_SYMBOL(system_state);
61770@@ -183,6 +184,49 @@ static int __init set_reset_devices(char
61771
61772 __setup("reset_devices", set_reset_devices);
61773
61774+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
61775+extern char pax_enter_kernel_user[];
61776+extern char pax_exit_kernel_user[];
61777+extern pgdval_t clone_pgd_mask;
61778+#endif
61779+
61780+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
61781+static int __init setup_pax_nouderef(char *str)
61782+{
61783+#ifdef CONFIG_X86_32
61784+ unsigned int cpu;
61785+ struct desc_struct *gdt;
61786+
61787+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
61788+ gdt = get_cpu_gdt_table(cpu);
61789+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
61790+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
61791+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
61792+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
61793+ }
61794+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
61795+#else
61796+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
61797+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
61798+ clone_pgd_mask = ~(pgdval_t)0UL;
61799+#endif
61800+
61801+ return 0;
61802+}
61803+early_param("pax_nouderef", setup_pax_nouderef);
61804+#endif
61805+
61806+#ifdef CONFIG_PAX_SOFTMODE
61807+int pax_softmode;
61808+
61809+static int __init setup_pax_softmode(char *str)
61810+{
61811+ get_option(&str, &pax_softmode);
61812+ return 1;
61813+}
61814+__setup("pax_softmode=", setup_pax_softmode);
61815+#endif
61816+
61817 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
61818 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
61819 static const char *panic_later, *panic_param;
61820@@ -705,52 +749,53 @@ int initcall_debug;
61821 core_param(initcall_debug, initcall_debug, bool, 0644);
61822
61823 static char msgbuf[64];
61824-static struct boot_trace_call call;
61825-static struct boot_trace_ret ret;
61826+static struct boot_trace_call trace_call;
61827+static struct boot_trace_ret trace_ret;
61828
61829 int do_one_initcall(initcall_t fn)
61830 {
61831 int count = preempt_count();
61832 ktime_t calltime, delta, rettime;
61833+ const char *msg1 = "", *msg2 = "";
61834
61835 if (initcall_debug) {
61836- call.caller = task_pid_nr(current);
61837- printk("calling %pF @ %i\n", fn, call.caller);
61838+ trace_call.caller = task_pid_nr(current);
61839+ printk("calling %pF @ %i\n", fn, trace_call.caller);
61840 calltime = ktime_get();
61841- trace_boot_call(&call, fn);
61842+ trace_boot_call(&trace_call, fn);
61843 enable_boot_trace();
61844 }
61845
61846- ret.result = fn();
61847+ trace_ret.result = fn();
61848
61849 if (initcall_debug) {
61850 disable_boot_trace();
61851 rettime = ktime_get();
61852 delta = ktime_sub(rettime, calltime);
61853- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
61854- trace_boot_ret(&ret, fn);
61855+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
61856+ trace_boot_ret(&trace_ret, fn);
61857 printk("initcall %pF returned %d after %Ld usecs\n", fn,
61858- ret.result, ret.duration);
61859+ trace_ret.result, trace_ret.duration);
61860 }
61861
61862 msgbuf[0] = 0;
61863
61864- if (ret.result && ret.result != -ENODEV && initcall_debug)
61865- sprintf(msgbuf, "error code %d ", ret.result);
61866+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
61867+ sprintf(msgbuf, "error code %d ", trace_ret.result);
61868
61869 if (preempt_count() != count) {
61870- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
61871+ msg1 = " preemption imbalance";
61872 preempt_count() = count;
61873 }
61874 if (irqs_disabled()) {
61875- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
61876+ msg2 = " disabled interrupts";
61877 local_irq_enable();
61878 }
61879- if (msgbuf[0]) {
61880- printk("initcall %pF returned with %s\n", fn, msgbuf);
61881+ if (msgbuf[0] || *msg1 || *msg2) {
61882+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
61883 }
61884
61885- return ret.result;
61886+ return trace_ret.result;
61887 }
61888
61889
61890@@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
61891 if (!ramdisk_execute_command)
61892 ramdisk_execute_command = "/init";
61893
61894- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
61895+ if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
61896 ramdisk_execute_command = NULL;
61897 prepare_namespace();
61898 }
61899
61900+ grsecurity_init();
61901+
61902 /*
61903 * Ok, we have completed the initial bootup, and
61904 * we're essentially up and running. Get rid of the
61905diff -urNp linux-2.6.32.46/init/noinitramfs.c linux-2.6.32.46/init/noinitramfs.c
61906--- linux-2.6.32.46/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
61907+++ linux-2.6.32.46/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
61908@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
61909 {
61910 int err;
61911
61912- err = sys_mkdir("/dev", 0755);
61913+ err = sys_mkdir((const char __user *)"/dev", 0755);
61914 if (err < 0)
61915 goto out;
61916
61917@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
61918 if (err < 0)
61919 goto out;
61920
61921- err = sys_mkdir("/root", 0700);
61922+ err = sys_mkdir((const char __user *)"/root", 0700);
61923 if (err < 0)
61924 goto out;
61925
61926diff -urNp linux-2.6.32.46/ipc/mqueue.c linux-2.6.32.46/ipc/mqueue.c
61927--- linux-2.6.32.46/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
61928+++ linux-2.6.32.46/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
61929@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
61930 mq_bytes = (mq_msg_tblsz +
61931 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
61932
61933+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
61934 spin_lock(&mq_lock);
61935 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
61936 u->mq_bytes + mq_bytes >
61937diff -urNp linux-2.6.32.46/ipc/msg.c linux-2.6.32.46/ipc/msg.c
61938--- linux-2.6.32.46/ipc/msg.c 2011-03-27 14:31:47.000000000 -0400
61939+++ linux-2.6.32.46/ipc/msg.c 2011-08-05 20:33:55.000000000 -0400
61940@@ -310,18 +310,19 @@ static inline int msg_security(struct ke
61941 return security_msg_queue_associate(msq, msgflg);
61942 }
61943
61944+static struct ipc_ops msg_ops = {
61945+ .getnew = newque,
61946+ .associate = msg_security,
61947+ .more_checks = NULL
61948+};
61949+
61950 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
61951 {
61952 struct ipc_namespace *ns;
61953- struct ipc_ops msg_ops;
61954 struct ipc_params msg_params;
61955
61956 ns = current->nsproxy->ipc_ns;
61957
61958- msg_ops.getnew = newque;
61959- msg_ops.associate = msg_security;
61960- msg_ops.more_checks = NULL;
61961-
61962 msg_params.key = key;
61963 msg_params.flg = msgflg;
61964
61965diff -urNp linux-2.6.32.46/ipc/sem.c linux-2.6.32.46/ipc/sem.c
61966--- linux-2.6.32.46/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
61967+++ linux-2.6.32.46/ipc/sem.c 2011-08-05 20:33:55.000000000 -0400
61968@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct
61969 return 0;
61970 }
61971
61972+static struct ipc_ops sem_ops = {
61973+ .getnew = newary,
61974+ .associate = sem_security,
61975+ .more_checks = sem_more_checks
61976+};
61977+
61978 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
61979 {
61980 struct ipc_namespace *ns;
61981- struct ipc_ops sem_ops;
61982 struct ipc_params sem_params;
61983
61984 ns = current->nsproxy->ipc_ns;
61985@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
61986 if (nsems < 0 || nsems > ns->sc_semmsl)
61987 return -EINVAL;
61988
61989- sem_ops.getnew = newary;
61990- sem_ops.associate = sem_security;
61991- sem_ops.more_checks = sem_more_checks;
61992-
61993 sem_params.key = key;
61994 sem_params.flg = semflg;
61995 sem_params.u.nsems = nsems;
61996@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namesp
61997 ushort* sem_io = fast_sem_io;
61998 int nsems;
61999
62000+ pax_track_stack();
62001+
62002 sma = sem_lock_check(ns, semid);
62003 if (IS_ERR(sma))
62004 return PTR_ERR(sma);
62005@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
62006 unsigned long jiffies_left = 0;
62007 struct ipc_namespace *ns;
62008
62009+ pax_track_stack();
62010+
62011 ns = current->nsproxy->ipc_ns;
62012
62013 if (nsops < 1 || semid < 0)
62014diff -urNp linux-2.6.32.46/ipc/shm.c linux-2.6.32.46/ipc/shm.c
62015--- linux-2.6.32.46/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
62016+++ linux-2.6.32.46/ipc/shm.c 2011-08-05 20:33:55.000000000 -0400
62017@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
62018 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62019 #endif
62020
62021+#ifdef CONFIG_GRKERNSEC
62022+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62023+ const time_t shm_createtime, const uid_t cuid,
62024+ const int shmid);
62025+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62026+ const time_t shm_createtime);
62027+#endif
62028+
62029 void shm_init_ns(struct ipc_namespace *ns)
62030 {
62031 ns->shm_ctlmax = SHMMAX;
62032@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
62033 shp->shm_lprid = 0;
62034 shp->shm_atim = shp->shm_dtim = 0;
62035 shp->shm_ctim = get_seconds();
62036+#ifdef CONFIG_GRKERNSEC
62037+ {
62038+ struct timespec timeval;
62039+ do_posix_clock_monotonic_gettime(&timeval);
62040+
62041+ shp->shm_createtime = timeval.tv_sec;
62042+ }
62043+#endif
62044 shp->shm_segsz = size;
62045 shp->shm_nattch = 0;
62046 shp->shm_file = file;
62047@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct
62048 return 0;
62049 }
62050
62051+static struct ipc_ops shm_ops = {
62052+ .getnew = newseg,
62053+ .associate = shm_security,
62054+ .more_checks = shm_more_checks
62055+};
62056+
62057 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62058 {
62059 struct ipc_namespace *ns;
62060- struct ipc_ops shm_ops;
62061 struct ipc_params shm_params;
62062
62063 ns = current->nsproxy->ipc_ns;
62064
62065- shm_ops.getnew = newseg;
62066- shm_ops.associate = shm_security;
62067- shm_ops.more_checks = shm_more_checks;
62068-
62069 shm_params.key = key;
62070 shm_params.flg = shmflg;
62071 shm_params.u.size = size;
62072@@ -880,9 +897,21 @@ long do_shmat(int shmid, char __user *sh
62073 if (err)
62074 goto out_unlock;
62075
62076+#ifdef CONFIG_GRKERNSEC
62077+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62078+ shp->shm_perm.cuid, shmid) ||
62079+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62080+ err = -EACCES;
62081+ goto out_unlock;
62082+ }
62083+#endif
62084+
62085 path.dentry = dget(shp->shm_file->f_path.dentry);
62086 path.mnt = shp->shm_file->f_path.mnt;
62087 shp->shm_nattch++;
62088+#ifdef CONFIG_GRKERNSEC
62089+ shp->shm_lapid = current->pid;
62090+#endif
62091 size = i_size_read(path.dentry->d_inode);
62092 shm_unlock(shp);
62093
62094diff -urNp linux-2.6.32.46/kernel/acct.c linux-2.6.32.46/kernel/acct.c
62095--- linux-2.6.32.46/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
62096+++ linux-2.6.32.46/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
62097@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
62098 */
62099 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62100 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62101- file->f_op->write(file, (char *)&ac,
62102+ file->f_op->write(file, (__force char __user *)&ac,
62103 sizeof(acct_t), &file->f_pos);
62104 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62105 set_fs(fs);
62106diff -urNp linux-2.6.32.46/kernel/audit.c linux-2.6.32.46/kernel/audit.c
62107--- linux-2.6.32.46/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
62108+++ linux-2.6.32.46/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
62109@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
62110 3) suppressed due to audit_rate_limit
62111 4) suppressed due to audit_backlog_limit
62112 */
62113-static atomic_t audit_lost = ATOMIC_INIT(0);
62114+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62115
62116 /* The netlink socket. */
62117 static struct sock *audit_sock;
62118@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
62119 unsigned long now;
62120 int print;
62121
62122- atomic_inc(&audit_lost);
62123+ atomic_inc_unchecked(&audit_lost);
62124
62125 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62126
62127@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
62128 printk(KERN_WARNING
62129 "audit: audit_lost=%d audit_rate_limit=%d "
62130 "audit_backlog_limit=%d\n",
62131- atomic_read(&audit_lost),
62132+ atomic_read_unchecked(&audit_lost),
62133 audit_rate_limit,
62134 audit_backlog_limit);
62135 audit_panic(message);
62136@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
62137 status_set.pid = audit_pid;
62138 status_set.rate_limit = audit_rate_limit;
62139 status_set.backlog_limit = audit_backlog_limit;
62140- status_set.lost = atomic_read(&audit_lost);
62141+ status_set.lost = atomic_read_unchecked(&audit_lost);
62142 status_set.backlog = skb_queue_len(&audit_skb_queue);
62143 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62144 &status_set, sizeof(status_set));
62145@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
62146 spin_unlock_irq(&tsk->sighand->siglock);
62147 }
62148 read_unlock(&tasklist_lock);
62149- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
62150- &s, sizeof(s));
62151+
62152+ if (!err)
62153+ audit_send_reply(NETLINK_CB(skb).pid, seq,
62154+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
62155 break;
62156 }
62157 case AUDIT_TTY_SET: {
62158diff -urNp linux-2.6.32.46/kernel/auditsc.c linux-2.6.32.46/kernel/auditsc.c
62159--- linux-2.6.32.46/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
62160+++ linux-2.6.32.46/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
62161@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
62162 }
62163
62164 /* global counter which is incremented every time something logs in */
62165-static atomic_t session_id = ATOMIC_INIT(0);
62166+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62167
62168 /**
62169 * audit_set_loginuid - set a task's audit_context loginuid
62170@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
62171 */
62172 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62173 {
62174- unsigned int sessionid = atomic_inc_return(&session_id);
62175+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62176 struct audit_context *context = task->audit_context;
62177
62178 if (context && context->in_syscall) {
62179diff -urNp linux-2.6.32.46/kernel/capability.c linux-2.6.32.46/kernel/capability.c
62180--- linux-2.6.32.46/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
62181+++ linux-2.6.32.46/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
62182@@ -305,10 +305,26 @@ int capable(int cap)
62183 BUG();
62184 }
62185
62186- if (security_capable(cap) == 0) {
62187+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
62188 current->flags |= PF_SUPERPRIV;
62189 return 1;
62190 }
62191 return 0;
62192 }
62193+
62194+int capable_nolog(int cap)
62195+{
62196+ if (unlikely(!cap_valid(cap))) {
62197+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62198+ BUG();
62199+ }
62200+
62201+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
62202+ current->flags |= PF_SUPERPRIV;
62203+ return 1;
62204+ }
62205+ return 0;
62206+}
62207+
62208 EXPORT_SYMBOL(capable);
62209+EXPORT_SYMBOL(capable_nolog);
62210diff -urNp linux-2.6.32.46/kernel/cgroup.c linux-2.6.32.46/kernel/cgroup.c
62211--- linux-2.6.32.46/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
62212+++ linux-2.6.32.46/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
62213@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
62214 struct hlist_head *hhead;
62215 struct cg_cgroup_link *link;
62216
62217+ pax_track_stack();
62218+
62219 /* First see if we already have a cgroup group that matches
62220 * the desired set */
62221 read_lock(&css_set_lock);
62222diff -urNp linux-2.6.32.46/kernel/configs.c linux-2.6.32.46/kernel/configs.c
62223--- linux-2.6.32.46/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
62224+++ linux-2.6.32.46/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
62225@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
62226 struct proc_dir_entry *entry;
62227
62228 /* create the current config file */
62229+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62230+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62231+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62232+ &ikconfig_file_ops);
62233+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62234+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62235+ &ikconfig_file_ops);
62236+#endif
62237+#else
62238 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62239 &ikconfig_file_ops);
62240+#endif
62241+
62242 if (!entry)
62243 return -ENOMEM;
62244
62245diff -urNp linux-2.6.32.46/kernel/cpu.c linux-2.6.32.46/kernel/cpu.c
62246--- linux-2.6.32.46/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
62247+++ linux-2.6.32.46/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
62248@@ -19,7 +19,7 @@
62249 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
62250 static DEFINE_MUTEX(cpu_add_remove_lock);
62251
62252-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
62253+static RAW_NOTIFIER_HEAD(cpu_chain);
62254
62255 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
62256 * Should always be manipulated under cpu_add_remove_lock
62257diff -urNp linux-2.6.32.46/kernel/cred.c linux-2.6.32.46/kernel/cred.c
62258--- linux-2.6.32.46/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
62259+++ linux-2.6.32.46/kernel/cred.c 2011-08-11 19:49:38.000000000 -0400
62260@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
62261 */
62262 void __put_cred(struct cred *cred)
62263 {
62264+ pax_track_stack();
62265+
62266 kdebug("__put_cred(%p{%d,%d})", cred,
62267 atomic_read(&cred->usage),
62268 read_cred_subscribers(cred));
62269@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
62270 {
62271 struct cred *cred;
62272
62273+ pax_track_stack();
62274+
62275 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62276 atomic_read(&tsk->cred->usage),
62277 read_cred_subscribers(tsk->cred));
62278@@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
62279 {
62280 const struct cred *cred;
62281
62282+ pax_track_stack();
62283+
62284 rcu_read_lock();
62285
62286 do {
62287@@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
62288 {
62289 struct cred *new;
62290
62291+ pax_track_stack();
62292+
62293 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62294 if (!new)
62295 return NULL;
62296@@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
62297 const struct cred *old;
62298 struct cred *new;
62299
62300+ pax_track_stack();
62301+
62302 validate_process_creds();
62303
62304 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62305@@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
62306 struct thread_group_cred *tgcred = NULL;
62307 struct cred *new;
62308
62309+ pax_track_stack();
62310+
62311 #ifdef CONFIG_KEYS
62312 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62313 if (!tgcred)
62314@@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
62315 struct cred *new;
62316 int ret;
62317
62318+ pax_track_stack();
62319+
62320 mutex_init(&p->cred_guard_mutex);
62321
62322 if (
62323@@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
62324 struct task_struct *task = current;
62325 const struct cred *old = task->real_cred;
62326
62327+ pax_track_stack();
62328+
62329 kdebug("commit_creds(%p{%d,%d})", new,
62330 atomic_read(&new->usage),
62331 read_cred_subscribers(new));
62332@@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
62333
62334 get_cred(new); /* we will require a ref for the subj creds too */
62335
62336+ gr_set_role_label(task, new->uid, new->gid);
62337+
62338 /* dumpability changes */
62339 if (old->euid != new->euid ||
62340 old->egid != new->egid ||
62341@@ -563,10 +581,8 @@ int commit_creds(struct cred *new)
62342 key_fsgid_changed(task);
62343
62344 /* do it
62345- * - What if a process setreuid()'s and this brings the
62346- * new uid over his NPROC rlimit? We can check this now
62347- * cheaply with the new uid cache, so if it matters
62348- * we should be checking for it. -DaveM
62349+ * RLIMIT_NPROC limits on user->processes have already been checked
62350+ * in set_user().
62351 */
62352 alter_cred_subscribers(new, 2);
62353 if (new->user != old->user)
62354@@ -606,6 +622,8 @@ EXPORT_SYMBOL(commit_creds);
62355 */
62356 void abort_creds(struct cred *new)
62357 {
62358+ pax_track_stack();
62359+
62360 kdebug("abort_creds(%p{%d,%d})", new,
62361 atomic_read(&new->usage),
62362 read_cred_subscribers(new));
62363@@ -629,6 +647,8 @@ const struct cred *override_creds(const
62364 {
62365 const struct cred *old = current->cred;
62366
62367+ pax_track_stack();
62368+
62369 kdebug("override_creds(%p{%d,%d})", new,
62370 atomic_read(&new->usage),
62371 read_cred_subscribers(new));
62372@@ -658,6 +678,8 @@ void revert_creds(const struct cred *old
62373 {
62374 const struct cred *override = current->cred;
62375
62376+ pax_track_stack();
62377+
62378 kdebug("revert_creds(%p{%d,%d})", old,
62379 atomic_read(&old->usage),
62380 read_cred_subscribers(old));
62381@@ -704,6 +726,8 @@ struct cred *prepare_kernel_cred(struct
62382 const struct cred *old;
62383 struct cred *new;
62384
62385+ pax_track_stack();
62386+
62387 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62388 if (!new)
62389 return NULL;
62390@@ -758,6 +782,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
62391 */
62392 int set_security_override(struct cred *new, u32 secid)
62393 {
62394+ pax_track_stack();
62395+
62396 return security_kernel_act_as(new, secid);
62397 }
62398 EXPORT_SYMBOL(set_security_override);
62399@@ -777,6 +803,8 @@ int set_security_override_from_ctx(struc
62400 u32 secid;
62401 int ret;
62402
62403+ pax_track_stack();
62404+
62405 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
62406 if (ret < 0)
62407 return ret;
62408diff -urNp linux-2.6.32.46/kernel/exit.c linux-2.6.32.46/kernel/exit.c
62409--- linux-2.6.32.46/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
62410+++ linux-2.6.32.46/kernel/exit.c 2011-08-17 19:19:50.000000000 -0400
62411@@ -55,6 +55,10 @@
62412 #include <asm/pgtable.h>
62413 #include <asm/mmu_context.h>
62414
62415+#ifdef CONFIG_GRKERNSEC
62416+extern rwlock_t grsec_exec_file_lock;
62417+#endif
62418+
62419 static void exit_mm(struct task_struct * tsk);
62420
62421 static void __unhash_process(struct task_struct *p)
62422@@ -174,6 +178,10 @@ void release_task(struct task_struct * p
62423 struct task_struct *leader;
62424 int zap_leader;
62425 repeat:
62426+#ifdef CONFIG_NET
62427+ gr_del_task_from_ip_table(p);
62428+#endif
62429+
62430 tracehook_prepare_release_task(p);
62431 /* don't need to get the RCU readlock here - the process is dead and
62432 * can't be modifying its own credentials */
62433@@ -341,11 +349,22 @@ static void reparent_to_kthreadd(void)
62434 {
62435 write_lock_irq(&tasklist_lock);
62436
62437+#ifdef CONFIG_GRKERNSEC
62438+ write_lock(&grsec_exec_file_lock);
62439+ if (current->exec_file) {
62440+ fput(current->exec_file);
62441+ current->exec_file = NULL;
62442+ }
62443+ write_unlock(&grsec_exec_file_lock);
62444+#endif
62445+
62446 ptrace_unlink(current);
62447 /* Reparent to init */
62448 current->real_parent = current->parent = kthreadd_task;
62449 list_move_tail(&current->sibling, &current->real_parent->children);
62450
62451+ gr_set_kernel_label(current);
62452+
62453 /* Set the exit signal to SIGCHLD so we signal init on exit */
62454 current->exit_signal = SIGCHLD;
62455
62456@@ -397,7 +416,7 @@ int allow_signal(int sig)
62457 * know it'll be handled, so that they don't get converted to
62458 * SIGKILL or just silently dropped.
62459 */
62460- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
62461+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
62462 recalc_sigpending();
62463 spin_unlock_irq(&current->sighand->siglock);
62464 return 0;
62465@@ -433,6 +452,17 @@ void daemonize(const char *name, ...)
62466 vsnprintf(current->comm, sizeof(current->comm), name, args);
62467 va_end(args);
62468
62469+#ifdef CONFIG_GRKERNSEC
62470+ write_lock(&grsec_exec_file_lock);
62471+ if (current->exec_file) {
62472+ fput(current->exec_file);
62473+ current->exec_file = NULL;
62474+ }
62475+ write_unlock(&grsec_exec_file_lock);
62476+#endif
62477+
62478+ gr_set_kernel_label(current);
62479+
62480 /*
62481 * If we were started as result of loading a module, close all of the
62482 * user space pages. We don't need them, and if we didn't close them
62483@@ -897,17 +927,17 @@ NORET_TYPE void do_exit(long code)
62484 struct task_struct *tsk = current;
62485 int group_dead;
62486
62487- profile_task_exit(tsk);
62488-
62489- WARN_ON(atomic_read(&tsk->fs_excl));
62490-
62491+ /*
62492+ * Check this first since set_fs() below depends on
62493+ * current_thread_info(), which we better not access when we're in
62494+ * interrupt context. Other than that, we want to do the set_fs()
62495+ * as early as possible.
62496+ */
62497 if (unlikely(in_interrupt()))
62498 panic("Aiee, killing interrupt handler!");
62499- if (unlikely(!tsk->pid))
62500- panic("Attempted to kill the idle task!");
62501
62502 /*
62503- * If do_exit is called because this processes oopsed, it's possible
62504+ * If do_exit is called because this processes Oops'ed, it's possible
62505 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
62506 * continuing. Amongst other possible reasons, this is to prevent
62507 * mm_release()->clear_child_tid() from writing to a user-controlled
62508@@ -915,6 +945,13 @@ NORET_TYPE void do_exit(long code)
62509 */
62510 set_fs(USER_DS);
62511
62512+ profile_task_exit(tsk);
62513+
62514+ WARN_ON(atomic_read(&tsk->fs_excl));
62515+
62516+ if (unlikely(!tsk->pid))
62517+ panic("Attempted to kill the idle task!");
62518+
62519 tracehook_report_exit(&code);
62520
62521 validate_creds_for_do_exit(tsk);
62522@@ -973,6 +1010,9 @@ NORET_TYPE void do_exit(long code)
62523 tsk->exit_code = code;
62524 taskstats_exit(tsk, group_dead);
62525
62526+ gr_acl_handle_psacct(tsk, code);
62527+ gr_acl_handle_exit();
62528+
62529 exit_mm(tsk);
62530
62531 if (group_dead)
62532@@ -1188,7 +1228,7 @@ static int wait_task_zombie(struct wait_
62533
62534 if (unlikely(wo->wo_flags & WNOWAIT)) {
62535 int exit_code = p->exit_code;
62536- int why, status;
62537+ int why;
62538
62539 get_task_struct(p);
62540 read_unlock(&tasklist_lock);
62541diff -urNp linux-2.6.32.46/kernel/fork.c linux-2.6.32.46/kernel/fork.c
62542--- linux-2.6.32.46/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
62543+++ linux-2.6.32.46/kernel/fork.c 2011-08-11 19:50:07.000000000 -0400
62544@@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
62545 *stackend = STACK_END_MAGIC; /* for overflow detection */
62546
62547 #ifdef CONFIG_CC_STACKPROTECTOR
62548- tsk->stack_canary = get_random_int();
62549+ tsk->stack_canary = pax_get_random_long();
62550 #endif
62551
62552 /* One for us, one for whoever does the "release_task()" (usually parent) */
62553@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
62554 mm->locked_vm = 0;
62555 mm->mmap = NULL;
62556 mm->mmap_cache = NULL;
62557- mm->free_area_cache = oldmm->mmap_base;
62558- mm->cached_hole_size = ~0UL;
62559+ mm->free_area_cache = oldmm->free_area_cache;
62560+ mm->cached_hole_size = oldmm->cached_hole_size;
62561 mm->map_count = 0;
62562 cpumask_clear(mm_cpumask(mm));
62563 mm->mm_rb = RB_ROOT;
62564@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
62565 tmp->vm_flags &= ~VM_LOCKED;
62566 tmp->vm_mm = mm;
62567 tmp->vm_next = tmp->vm_prev = NULL;
62568+ tmp->vm_mirror = NULL;
62569 anon_vma_link(tmp);
62570 file = tmp->vm_file;
62571 if (file) {
62572@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
62573 if (retval)
62574 goto out;
62575 }
62576+
62577+#ifdef CONFIG_PAX_SEGMEXEC
62578+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
62579+ struct vm_area_struct *mpnt_m;
62580+
62581+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
62582+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
62583+
62584+ if (!mpnt->vm_mirror)
62585+ continue;
62586+
62587+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
62588+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
62589+ mpnt->vm_mirror = mpnt_m;
62590+ } else {
62591+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
62592+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
62593+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
62594+ mpnt->vm_mirror->vm_mirror = mpnt;
62595+ }
62596+ }
62597+ BUG_ON(mpnt_m);
62598+ }
62599+#endif
62600+
62601 /* a new mm has just been created */
62602 arch_dup_mmap(oldmm, mm);
62603 retval = 0;
62604@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
62605 write_unlock(&fs->lock);
62606 return -EAGAIN;
62607 }
62608- fs->users++;
62609+ atomic_inc(&fs->users);
62610 write_unlock(&fs->lock);
62611 return 0;
62612 }
62613 tsk->fs = copy_fs_struct(fs);
62614 if (!tsk->fs)
62615 return -ENOMEM;
62616+ gr_set_chroot_entries(tsk, &tsk->fs->root);
62617 return 0;
62618 }
62619
62620@@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(
62621 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
62622 #endif
62623 retval = -EAGAIN;
62624+
62625+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
62626+
62627 if (atomic_read(&p->real_cred->user->processes) >=
62628 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
62629- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
62630- p->real_cred->user != INIT_USER)
62631+ if (p->real_cred->user != INIT_USER &&
62632+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
62633 goto bad_fork_free;
62634 }
62635+ current->flags &= ~PF_NPROC_EXCEEDED;
62636
62637 retval = copy_creds(p, clone_flags);
62638 if (retval < 0)
62639@@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(
62640 goto bad_fork_free_pid;
62641 }
62642
62643+ gr_copy_label(p);
62644+
62645 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
62646 /*
62647 * Clear TID on mm_release()?
62648@@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
62649 bad_fork_free:
62650 free_task(p);
62651 fork_out:
62652+ gr_log_forkfail(retval);
62653+
62654 return ERR_PTR(retval);
62655 }
62656
62657@@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
62658 if (clone_flags & CLONE_PARENT_SETTID)
62659 put_user(nr, parent_tidptr);
62660
62661+ gr_handle_brute_check();
62662+
62663 if (clone_flags & CLONE_VFORK) {
62664 p->vfork_done = &vfork;
62665 init_completion(&vfork);
62666@@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unsh
62667 return 0;
62668
62669 /* don't need lock here; in the worst case we'll do useless copy */
62670- if (fs->users == 1)
62671+ if (atomic_read(&fs->users) == 1)
62672 return 0;
62673
62674 *new_fsp = copy_fs_struct(fs);
62675@@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
62676 fs = current->fs;
62677 write_lock(&fs->lock);
62678 current->fs = new_fs;
62679- if (--fs->users)
62680+ gr_set_chroot_entries(current, &current->fs->root);
62681+ if (atomic_dec_return(&fs->users))
62682 new_fs = NULL;
62683 else
62684 new_fs = fs;
62685diff -urNp linux-2.6.32.46/kernel/futex.c linux-2.6.32.46/kernel/futex.c
62686--- linux-2.6.32.46/kernel/futex.c 2011-08-29 22:24:44.000000000 -0400
62687+++ linux-2.6.32.46/kernel/futex.c 2011-08-29 22:25:07.000000000 -0400
62688@@ -54,6 +54,7 @@
62689 #include <linux/mount.h>
62690 #include <linux/pagemap.h>
62691 #include <linux/syscalls.h>
62692+#include <linux/ptrace.h>
62693 #include <linux/signal.h>
62694 #include <linux/module.h>
62695 #include <linux/magic.h>
62696@@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fsh
62697 struct page *page;
62698 int err, ro = 0;
62699
62700+#ifdef CONFIG_PAX_SEGMEXEC
62701+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
62702+ return -EFAULT;
62703+#endif
62704+
62705 /*
62706 * The futex address must be "naturally" aligned.
62707 */
62708@@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr,
62709 struct futex_q q;
62710 int ret;
62711
62712+ pax_track_stack();
62713+
62714 if (!bitset)
62715 return -EINVAL;
62716
62717@@ -1871,7 +1879,7 @@ retry:
62718
62719 restart = &current_thread_info()->restart_block;
62720 restart->fn = futex_wait_restart;
62721- restart->futex.uaddr = (u32 *)uaddr;
62722+ restart->futex.uaddr = uaddr;
62723 restart->futex.val = val;
62724 restart->futex.time = abs_time->tv64;
62725 restart->futex.bitset = bitset;
62726@@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __u
62727 struct futex_q q;
62728 int res, ret;
62729
62730+ pax_track_stack();
62731+
62732 if (!bitset)
62733 return -EINVAL;
62734
62735@@ -2407,7 +2417,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62736 {
62737 struct robust_list_head __user *head;
62738 unsigned long ret;
62739+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62740 const struct cred *cred = current_cred(), *pcred;
62741+#endif
62742
62743 if (!futex_cmpxchg_enabled)
62744 return -ENOSYS;
62745@@ -2423,11 +2435,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
62746 if (!p)
62747 goto err_unlock;
62748 ret = -EPERM;
62749+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62750+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
62751+ goto err_unlock;
62752+#else
62753 pcred = __task_cred(p);
62754 if (cred->euid != pcred->euid &&
62755 cred->euid != pcred->uid &&
62756 !capable(CAP_SYS_PTRACE))
62757 goto err_unlock;
62758+#endif
62759 head = p->robust_list;
62760 rcu_read_unlock();
62761 }
62762@@ -2489,7 +2506,7 @@ retry:
62763 */
62764 static inline int fetch_robust_entry(struct robust_list __user **entry,
62765 struct robust_list __user * __user *head,
62766- int *pi)
62767+ unsigned int *pi)
62768 {
62769 unsigned long uentry;
62770
62771@@ -2670,6 +2687,7 @@ static int __init futex_init(void)
62772 {
62773 u32 curval;
62774 int i;
62775+ mm_segment_t oldfs;
62776
62777 /*
62778 * This will fail and we want it. Some arch implementations do
62779@@ -2681,7 +2699,10 @@ static int __init futex_init(void)
62780 * implementation, the non functional ones will return
62781 * -ENOSYS.
62782 */
62783+ oldfs = get_fs();
62784+ set_fs(USER_DS);
62785 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
62786+ set_fs(oldfs);
62787 if (curval == -EFAULT)
62788 futex_cmpxchg_enabled = 1;
62789
62790diff -urNp linux-2.6.32.46/kernel/futex_compat.c linux-2.6.32.46/kernel/futex_compat.c
62791--- linux-2.6.32.46/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
62792+++ linux-2.6.32.46/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
62793@@ -10,6 +10,7 @@
62794 #include <linux/compat.h>
62795 #include <linux/nsproxy.h>
62796 #include <linux/futex.h>
62797+#include <linux/ptrace.h>
62798
62799 #include <asm/uaccess.h>
62800
62801@@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
62802 {
62803 struct compat_robust_list_head __user *head;
62804 unsigned long ret;
62805- const struct cred *cred = current_cred(), *pcred;
62806+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
62807+ const struct cred *cred = current_cred();
62808+ const struct cred *pcred;
62809+#endif
62810
62811 if (!futex_cmpxchg_enabled)
62812 return -ENOSYS;
62813@@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
62814 if (!p)
62815 goto err_unlock;
62816 ret = -EPERM;
62817+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62818+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
62819+ goto err_unlock;
62820+#else
62821 pcred = __task_cred(p);
62822 if (cred->euid != pcred->euid &&
62823 cred->euid != pcred->uid &&
62824 !capable(CAP_SYS_PTRACE))
62825 goto err_unlock;
62826+#endif
62827 head = p->compat_robust_list;
62828 read_unlock(&tasklist_lock);
62829 }
62830diff -urNp linux-2.6.32.46/kernel/gcov/base.c linux-2.6.32.46/kernel/gcov/base.c
62831--- linux-2.6.32.46/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
62832+++ linux-2.6.32.46/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
62833@@ -102,11 +102,6 @@ void gcov_enable_events(void)
62834 }
62835
62836 #ifdef CONFIG_MODULES
62837-static inline int within(void *addr, void *start, unsigned long size)
62838-{
62839- return ((addr >= start) && (addr < start + size));
62840-}
62841-
62842 /* Update list and generate events when modules are unloaded. */
62843 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
62844 void *data)
62845@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
62846 prev = NULL;
62847 /* Remove entries located in module from linked list. */
62848 for (info = gcov_info_head; info; info = info->next) {
62849- if (within(info, mod->module_core, mod->core_size)) {
62850+ if (within_module_core_rw((unsigned long)info, mod)) {
62851 if (prev)
62852 prev->next = info->next;
62853 else
62854diff -urNp linux-2.6.32.46/kernel/hrtimer.c linux-2.6.32.46/kernel/hrtimer.c
62855--- linux-2.6.32.46/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
62856+++ linux-2.6.32.46/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
62857@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
62858 local_irq_restore(flags);
62859 }
62860
62861-static void run_hrtimer_softirq(struct softirq_action *h)
62862+static void run_hrtimer_softirq(void)
62863 {
62864 hrtimer_peek_ahead_timers();
62865 }
62866diff -urNp linux-2.6.32.46/kernel/kallsyms.c linux-2.6.32.46/kernel/kallsyms.c
62867--- linux-2.6.32.46/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
62868+++ linux-2.6.32.46/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
62869@@ -11,6 +11,9 @@
62870 * Changed the compression method from stem compression to "table lookup"
62871 * compression (see scripts/kallsyms.c for a more complete description)
62872 */
62873+#ifdef CONFIG_GRKERNSEC_HIDESYM
62874+#define __INCLUDED_BY_HIDESYM 1
62875+#endif
62876 #include <linux/kallsyms.h>
62877 #include <linux/module.h>
62878 #include <linux/init.h>
62879@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
62880
62881 static inline int is_kernel_inittext(unsigned long addr)
62882 {
62883+ if (system_state != SYSTEM_BOOTING)
62884+ return 0;
62885+
62886 if (addr >= (unsigned long)_sinittext
62887 && addr <= (unsigned long)_einittext)
62888 return 1;
62889 return 0;
62890 }
62891
62892+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62893+#ifdef CONFIG_MODULES
62894+static inline int is_module_text(unsigned long addr)
62895+{
62896+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
62897+ return 1;
62898+
62899+ addr = ktla_ktva(addr);
62900+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
62901+}
62902+#else
62903+static inline int is_module_text(unsigned long addr)
62904+{
62905+ return 0;
62906+}
62907+#endif
62908+#endif
62909+
62910 static inline int is_kernel_text(unsigned long addr)
62911 {
62912 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
62913@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
62914
62915 static inline int is_kernel(unsigned long addr)
62916 {
62917+
62918+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62919+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
62920+ return 1;
62921+
62922+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
62923+#else
62924 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
62925+#endif
62926+
62927 return 1;
62928 return in_gate_area_no_task(addr);
62929 }
62930
62931 static int is_ksym_addr(unsigned long addr)
62932 {
62933+
62934+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62935+ if (is_module_text(addr))
62936+ return 0;
62937+#endif
62938+
62939 if (all_var)
62940 return is_kernel(addr);
62941
62942@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
62943
62944 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
62945 {
62946- iter->name[0] = '\0';
62947 iter->nameoff = get_symbol_offset(new_pos);
62948 iter->pos = new_pos;
62949 }
62950@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
62951 {
62952 struct kallsym_iter *iter = m->private;
62953
62954+#ifdef CONFIG_GRKERNSEC_HIDESYM
62955+ if (current_uid())
62956+ return 0;
62957+#endif
62958+
62959 /* Some debugging symbols have no name. Ignore them. */
62960 if (!iter->name[0])
62961 return 0;
62962@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
62963 struct kallsym_iter *iter;
62964 int ret;
62965
62966- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
62967+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
62968 if (!iter)
62969 return -ENOMEM;
62970 reset_iter(iter, 0);
62971diff -urNp linux-2.6.32.46/kernel/kgdb.c linux-2.6.32.46/kernel/kgdb.c
62972--- linux-2.6.32.46/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
62973+++ linux-2.6.32.46/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
62974@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
62975 /* Guard for recursive entry */
62976 static int exception_level;
62977
62978-static struct kgdb_io *kgdb_io_ops;
62979+static const struct kgdb_io *kgdb_io_ops;
62980 static DEFINE_SPINLOCK(kgdb_registration_lock);
62981
62982 /* kgdb console driver is loaded */
62983@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
62984 */
62985 static atomic_t passive_cpu_wait[NR_CPUS];
62986 static atomic_t cpu_in_kgdb[NR_CPUS];
62987-atomic_t kgdb_setting_breakpoint;
62988+atomic_unchecked_t kgdb_setting_breakpoint;
62989
62990 struct task_struct *kgdb_usethread;
62991 struct task_struct *kgdb_contthread;
62992@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
62993 sizeof(unsigned long)];
62994
62995 /* to keep track of the CPU which is doing the single stepping*/
62996-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62997+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
62998
62999 /*
63000 * If you are debugging a problem where roundup (the collection of
63001@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
63002 return 0;
63003 if (kgdb_connected)
63004 return 1;
63005- if (atomic_read(&kgdb_setting_breakpoint))
63006+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
63007 return 1;
63008 if (print_wait)
63009 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
63010@@ -1426,8 +1426,8 @@ acquirelock:
63011 * instance of the exception handler wanted to come into the
63012 * debugger on a different CPU via a single step
63013 */
63014- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63015- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
63016+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63017+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
63018
63019 atomic_set(&kgdb_active, -1);
63020 touch_softlockup_watchdog();
63021@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
63022 *
63023 * Register it with the KGDB core.
63024 */
63025-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
63026+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
63027 {
63028 int err;
63029
63030@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
63031 *
63032 * Unregister it with the KGDB core.
63033 */
63034-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
63035+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
63036 {
63037 BUG_ON(kgdb_connected);
63038
63039@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
63040 */
63041 void kgdb_breakpoint(void)
63042 {
63043- atomic_set(&kgdb_setting_breakpoint, 1);
63044+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
63045 wmb(); /* Sync point before breakpoint */
63046 arch_kgdb_breakpoint();
63047 wmb(); /* Sync point after breakpoint */
63048- atomic_set(&kgdb_setting_breakpoint, 0);
63049+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
63050 }
63051 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
63052
63053diff -urNp linux-2.6.32.46/kernel/kmod.c linux-2.6.32.46/kernel/kmod.c
63054--- linux-2.6.32.46/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
63055+++ linux-2.6.32.46/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
63056@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63057 * If module auto-loading support is disabled then this function
63058 * becomes a no-operation.
63059 */
63060-int __request_module(bool wait, const char *fmt, ...)
63061+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63062 {
63063- va_list args;
63064 char module_name[MODULE_NAME_LEN];
63065 unsigned int max_modprobes;
63066 int ret;
63067- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63068+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63069 static char *envp[] = { "HOME=/",
63070 "TERM=linux",
63071 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63072@@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
63073 if (ret)
63074 return ret;
63075
63076- va_start(args, fmt);
63077- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63078- va_end(args);
63079+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63080 if (ret >= MODULE_NAME_LEN)
63081 return -ENAMETOOLONG;
63082
63083+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63084+ if (!current_uid()) {
63085+ /* hack to workaround consolekit/udisks stupidity */
63086+ read_lock(&tasklist_lock);
63087+ if (!strcmp(current->comm, "mount") &&
63088+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63089+ read_unlock(&tasklist_lock);
63090+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63091+ return -EPERM;
63092+ }
63093+ read_unlock(&tasklist_lock);
63094+ }
63095+#endif
63096+
63097 /* If modprobe needs a service that is in a module, we get a recursive
63098 * loop. Limit the number of running kmod threads to max_threads/2 or
63099 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63100@@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
63101 atomic_dec(&kmod_concurrent);
63102 return ret;
63103 }
63104+
63105+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63106+{
63107+ va_list args;
63108+ int ret;
63109+
63110+ va_start(args, fmt);
63111+ ret = ____request_module(wait, module_param, fmt, args);
63112+ va_end(args);
63113+
63114+ return ret;
63115+}
63116+
63117+int __request_module(bool wait, const char *fmt, ...)
63118+{
63119+ va_list args;
63120+ int ret;
63121+
63122+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63123+ if (current_uid()) {
63124+ char module_param[MODULE_NAME_LEN];
63125+
63126+ memset(module_param, 0, sizeof(module_param));
63127+
63128+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63129+
63130+ va_start(args, fmt);
63131+ ret = ____request_module(wait, module_param, fmt, args);
63132+ va_end(args);
63133+
63134+ return ret;
63135+ }
63136+#endif
63137+
63138+ va_start(args, fmt);
63139+ ret = ____request_module(wait, NULL, fmt, args);
63140+ va_end(args);
63141+
63142+ return ret;
63143+}
63144+
63145+
63146 EXPORT_SYMBOL(__request_module);
63147 #endif /* CONFIG_MODULES */
63148
63149diff -urNp linux-2.6.32.46/kernel/kprobes.c linux-2.6.32.46/kernel/kprobes.c
63150--- linux-2.6.32.46/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
63151+++ linux-2.6.32.46/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
63152@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
63153 * kernel image and loaded module images reside. This is required
63154 * so x86_64 can correctly handle the %rip-relative fixups.
63155 */
63156- kip->insns = module_alloc(PAGE_SIZE);
63157+ kip->insns = module_alloc_exec(PAGE_SIZE);
63158 if (!kip->insns) {
63159 kfree(kip);
63160 return NULL;
63161@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
63162 */
63163 if (!list_is_singular(&kprobe_insn_pages)) {
63164 list_del(&kip->list);
63165- module_free(NULL, kip->insns);
63166+ module_free_exec(NULL, kip->insns);
63167 kfree(kip);
63168 }
63169 return 1;
63170@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
63171 {
63172 int i, err = 0;
63173 unsigned long offset = 0, size = 0;
63174- char *modname, namebuf[128];
63175+ char *modname, namebuf[KSYM_NAME_LEN];
63176 const char *symbol_name;
63177 void *addr;
63178 struct kprobe_blackpoint *kb;
63179@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
63180 const char *sym = NULL;
63181 unsigned int i = *(loff_t *) v;
63182 unsigned long offset = 0;
63183- char *modname, namebuf[128];
63184+ char *modname, namebuf[KSYM_NAME_LEN];
63185
63186 head = &kprobe_table[i];
63187 preempt_disable();
63188diff -urNp linux-2.6.32.46/kernel/lockdep.c linux-2.6.32.46/kernel/lockdep.c
63189--- linux-2.6.32.46/kernel/lockdep.c 2011-06-25 12:55:35.000000000 -0400
63190+++ linux-2.6.32.46/kernel/lockdep.c 2011-06-25 12:56:37.000000000 -0400
63191@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
63192 /*
63193 * Various lockdep statistics:
63194 */
63195-atomic_t chain_lookup_hits;
63196-atomic_t chain_lookup_misses;
63197-atomic_t hardirqs_on_events;
63198-atomic_t hardirqs_off_events;
63199-atomic_t redundant_hardirqs_on;
63200-atomic_t redundant_hardirqs_off;
63201-atomic_t softirqs_on_events;
63202-atomic_t softirqs_off_events;
63203-atomic_t redundant_softirqs_on;
63204-atomic_t redundant_softirqs_off;
63205-atomic_t nr_unused_locks;
63206-atomic_t nr_cyclic_checks;
63207-atomic_t nr_find_usage_forwards_checks;
63208-atomic_t nr_find_usage_backwards_checks;
63209+atomic_unchecked_t chain_lookup_hits;
63210+atomic_unchecked_t chain_lookup_misses;
63211+atomic_unchecked_t hardirqs_on_events;
63212+atomic_unchecked_t hardirqs_off_events;
63213+atomic_unchecked_t redundant_hardirqs_on;
63214+atomic_unchecked_t redundant_hardirqs_off;
63215+atomic_unchecked_t softirqs_on_events;
63216+atomic_unchecked_t softirqs_off_events;
63217+atomic_unchecked_t redundant_softirqs_on;
63218+atomic_unchecked_t redundant_softirqs_off;
63219+atomic_unchecked_t nr_unused_locks;
63220+atomic_unchecked_t nr_cyclic_checks;
63221+atomic_unchecked_t nr_find_usage_forwards_checks;
63222+atomic_unchecked_t nr_find_usage_backwards_checks;
63223 #endif
63224
63225 /*
63226@@ -577,6 +577,10 @@ static int static_obj(void *obj)
63227 int i;
63228 #endif
63229
63230+#ifdef CONFIG_PAX_KERNEXEC
63231+ start = ktla_ktva(start);
63232+#endif
63233+
63234 /*
63235 * static variable?
63236 */
63237@@ -592,8 +596,7 @@ static int static_obj(void *obj)
63238 */
63239 for_each_possible_cpu(i) {
63240 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
63241- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
63242- + per_cpu_offset(i);
63243+ end = start + PERCPU_ENOUGH_ROOM;
63244
63245 if ((addr >= start) && (addr < end))
63246 return 1;
63247@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
63248 if (!static_obj(lock->key)) {
63249 debug_locks_off();
63250 printk("INFO: trying to register non-static key.\n");
63251+ printk("lock:%pS key:%pS.\n", lock, lock->key);
63252 printk("the code is fine but needs lockdep annotation.\n");
63253 printk("turning off the locking correctness validator.\n");
63254 dump_stack();
63255@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
63256 if (!class)
63257 return 0;
63258 }
63259- debug_atomic_inc((atomic_t *)&class->ops);
63260+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
63261 if (very_verbose(class)) {
63262 printk("\nacquire class [%p] %s", class->key, class->name);
63263 if (class->name_version > 1)
63264diff -urNp linux-2.6.32.46/kernel/lockdep_internals.h linux-2.6.32.46/kernel/lockdep_internals.h
63265--- linux-2.6.32.46/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
63266+++ linux-2.6.32.46/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
63267@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
63268 /*
63269 * Various lockdep statistics:
63270 */
63271-extern atomic_t chain_lookup_hits;
63272-extern atomic_t chain_lookup_misses;
63273-extern atomic_t hardirqs_on_events;
63274-extern atomic_t hardirqs_off_events;
63275-extern atomic_t redundant_hardirqs_on;
63276-extern atomic_t redundant_hardirqs_off;
63277-extern atomic_t softirqs_on_events;
63278-extern atomic_t softirqs_off_events;
63279-extern atomic_t redundant_softirqs_on;
63280-extern atomic_t redundant_softirqs_off;
63281-extern atomic_t nr_unused_locks;
63282-extern atomic_t nr_cyclic_checks;
63283-extern atomic_t nr_cyclic_check_recursions;
63284-extern atomic_t nr_find_usage_forwards_checks;
63285-extern atomic_t nr_find_usage_forwards_recursions;
63286-extern atomic_t nr_find_usage_backwards_checks;
63287-extern atomic_t nr_find_usage_backwards_recursions;
63288-# define debug_atomic_inc(ptr) atomic_inc(ptr)
63289-# define debug_atomic_dec(ptr) atomic_dec(ptr)
63290-# define debug_atomic_read(ptr) atomic_read(ptr)
63291+extern atomic_unchecked_t chain_lookup_hits;
63292+extern atomic_unchecked_t chain_lookup_misses;
63293+extern atomic_unchecked_t hardirqs_on_events;
63294+extern atomic_unchecked_t hardirqs_off_events;
63295+extern atomic_unchecked_t redundant_hardirqs_on;
63296+extern atomic_unchecked_t redundant_hardirqs_off;
63297+extern atomic_unchecked_t softirqs_on_events;
63298+extern atomic_unchecked_t softirqs_off_events;
63299+extern atomic_unchecked_t redundant_softirqs_on;
63300+extern atomic_unchecked_t redundant_softirqs_off;
63301+extern atomic_unchecked_t nr_unused_locks;
63302+extern atomic_unchecked_t nr_cyclic_checks;
63303+extern atomic_unchecked_t nr_cyclic_check_recursions;
63304+extern atomic_unchecked_t nr_find_usage_forwards_checks;
63305+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
63306+extern atomic_unchecked_t nr_find_usage_backwards_checks;
63307+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
63308+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
63309+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
63310+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
63311 #else
63312 # define debug_atomic_inc(ptr) do { } while (0)
63313 # define debug_atomic_dec(ptr) do { } while (0)
63314diff -urNp linux-2.6.32.46/kernel/lockdep_proc.c linux-2.6.32.46/kernel/lockdep_proc.c
63315--- linux-2.6.32.46/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
63316+++ linux-2.6.32.46/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
63317@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63318
63319 static void print_name(struct seq_file *m, struct lock_class *class)
63320 {
63321- char str[128];
63322+ char str[KSYM_NAME_LEN];
63323 const char *name = class->name;
63324
63325 if (!name) {
63326diff -urNp linux-2.6.32.46/kernel/module.c linux-2.6.32.46/kernel/module.c
63327--- linux-2.6.32.46/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
63328+++ linux-2.6.32.46/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
63329@@ -55,6 +55,7 @@
63330 #include <linux/async.h>
63331 #include <linux/percpu.h>
63332 #include <linux/kmemleak.h>
63333+#include <linux/grsecurity.h>
63334
63335 #define CREATE_TRACE_POINTS
63336 #include <trace/events/module.h>
63337@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
63338 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
63339
63340 /* Bounds of module allocation, for speeding __module_address */
63341-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63342+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63343+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63344
63345 int register_module_notifier(struct notifier_block * nb)
63346 {
63347@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
63348 return true;
63349
63350 list_for_each_entry_rcu(mod, &modules, list) {
63351- struct symsearch arr[] = {
63352+ struct symsearch modarr[] = {
63353 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63354 NOT_GPL_ONLY, false },
63355 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63356@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
63357 #endif
63358 };
63359
63360- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63361+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63362 return true;
63363 }
63364 return false;
63365@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
63366 void *ptr;
63367 int cpu;
63368
63369- if (align > PAGE_SIZE) {
63370+ if (align-1 >= PAGE_SIZE) {
63371 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63372 name, align, PAGE_SIZE);
63373 align = PAGE_SIZE;
63374@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
63375 * /sys/module/foo/sections stuff
63376 * J. Corbet <corbet@lwn.net>
63377 */
63378-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
63379+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63380
63381 static inline bool sect_empty(const Elf_Shdr *sect)
63382 {
63383@@ -1545,7 +1547,8 @@ static void free_module(struct module *m
63384 destroy_params(mod->kp, mod->num_kp);
63385
63386 /* This may be NULL, but that's OK */
63387- module_free(mod, mod->module_init);
63388+ module_free(mod, mod->module_init_rw);
63389+ module_free_exec(mod, mod->module_init_rx);
63390 kfree(mod->args);
63391 if (mod->percpu)
63392 percpu_modfree(mod->percpu);
63393@@ -1554,10 +1557,12 @@ static void free_module(struct module *m
63394 percpu_modfree(mod->refptr);
63395 #endif
63396 /* Free lock-classes: */
63397- lockdep_free_key_range(mod->module_core, mod->core_size);
63398+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
63399+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
63400
63401 /* Finally, free the core (containing the module structure) */
63402- module_free(mod, mod->module_core);
63403+ module_free_exec(mod, mod->module_core_rx);
63404+ module_free(mod, mod->module_core_rw);
63405
63406 #ifdef CONFIG_MPU
63407 update_protections(current->mm);
63408@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
63409 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63410 int ret = 0;
63411 const struct kernel_symbol *ksym;
63412+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63413+ int is_fs_load = 0;
63414+ int register_filesystem_found = 0;
63415+ char *p;
63416+
63417+ p = strstr(mod->args, "grsec_modharden_fs");
63418+
63419+ if (p) {
63420+ char *endptr = p + strlen("grsec_modharden_fs");
63421+ /* copy \0 as well */
63422+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
63423+ is_fs_load = 1;
63424+ }
63425+#endif
63426+
63427
63428 for (i = 1; i < n; i++) {
63429+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63430+ const char *name = strtab + sym[i].st_name;
63431+
63432+ /* it's a real shame this will never get ripped and copied
63433+ upstream! ;(
63434+ */
63435+ if (is_fs_load && !strcmp(name, "register_filesystem"))
63436+ register_filesystem_found = 1;
63437+#endif
63438 switch (sym[i].st_shndx) {
63439 case SHN_COMMON:
63440 /* We compiled with -fno-common. These are not
63441@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
63442 strtab + sym[i].st_name, mod);
63443 /* Ok if resolved. */
63444 if (ksym) {
63445+ pax_open_kernel();
63446 sym[i].st_value = ksym->value;
63447+ pax_close_kernel();
63448 break;
63449 }
63450
63451@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
63452 secbase = (unsigned long)mod->percpu;
63453 else
63454 secbase = sechdrs[sym[i].st_shndx].sh_addr;
63455+ pax_open_kernel();
63456 sym[i].st_value += secbase;
63457+ pax_close_kernel();
63458 break;
63459 }
63460 }
63461
63462+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63463+ if (is_fs_load && !register_filesystem_found) {
63464+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
63465+ ret = -EPERM;
63466+ }
63467+#endif
63468+
63469 return ret;
63470 }
63471
63472@@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
63473 || s->sh_entsize != ~0UL
63474 || strstarts(secstrings + s->sh_name, ".init"))
63475 continue;
63476- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
63477+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63478+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
63479+ else
63480+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
63481 DEBUGP("\t%s\n", secstrings + s->sh_name);
63482 }
63483- if (m == 0)
63484- mod->core_text_size = mod->core_size;
63485 }
63486
63487 DEBUGP("Init section allocation order:\n");
63488@@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
63489 || s->sh_entsize != ~0UL
63490 || !strstarts(secstrings + s->sh_name, ".init"))
63491 continue;
63492- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
63493- | INIT_OFFSET_MASK);
63494+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63495+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
63496+ else
63497+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
63498+ s->sh_entsize |= INIT_OFFSET_MASK;
63499 DEBUGP("\t%s\n", secstrings + s->sh_name);
63500 }
63501- if (m == 0)
63502- mod->init_text_size = mod->init_size;
63503 }
63504 }
63505
63506@@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
63507
63508 /* As per nm */
63509 static char elf_type(const Elf_Sym *sym,
63510- Elf_Shdr *sechdrs,
63511- const char *secstrings,
63512- struct module *mod)
63513+ const Elf_Shdr *sechdrs,
63514+ const char *secstrings)
63515 {
63516 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
63517 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
63518@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
63519
63520 /* Put symbol section at end of init part of module. */
63521 symsect->sh_flags |= SHF_ALLOC;
63522- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
63523+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
63524 symindex) | INIT_OFFSET_MASK;
63525 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
63526
63527@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
63528 }
63529
63530 /* Append room for core symbols at end of core part. */
63531- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
63532- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
63533+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
63534+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
63535
63536 /* Put string table section at end of init part of module. */
63537 strsect->sh_flags |= SHF_ALLOC;
63538- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
63539+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
63540 strindex) | INIT_OFFSET_MASK;
63541 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
63542
63543 /* Append room for core symbols' strings at end of core part. */
63544- *pstroffs = mod->core_size;
63545+ *pstroffs = mod->core_size_rx;
63546 __set_bit(0, strmap);
63547- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
63548+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
63549
63550 return symoffs;
63551 }
63552@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
63553 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63554 mod->strtab = (void *)sechdrs[strindex].sh_addr;
63555
63556+ pax_open_kernel();
63557+
63558 /* Set types up while we still have access to sections. */
63559 for (i = 0; i < mod->num_symtab; i++)
63560 mod->symtab[i].st_info
63561- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
63562+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
63563
63564- mod->core_symtab = dst = mod->module_core + symoffs;
63565+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
63566 src = mod->symtab;
63567 *dst = *src;
63568 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
63569@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
63570 }
63571 mod->core_num_syms = ndst;
63572
63573- mod->core_strtab = s = mod->module_core + stroffs;
63574+ mod->core_strtab = s = mod->module_core_rx + stroffs;
63575 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
63576 if (test_bit(i, strmap))
63577 *++s = mod->strtab[i];
63578+
63579+ pax_close_kernel();
63580 }
63581 #else
63582 static inline unsigned long layout_symtab(struct module *mod,
63583@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
63584 #endif
63585 }
63586
63587-static void *module_alloc_update_bounds(unsigned long size)
63588+static void *module_alloc_update_bounds_rw(unsigned long size)
63589 {
63590 void *ret = module_alloc(size);
63591
63592 if (ret) {
63593 /* Update module bounds. */
63594- if ((unsigned long)ret < module_addr_min)
63595- module_addr_min = (unsigned long)ret;
63596- if ((unsigned long)ret + size > module_addr_max)
63597- module_addr_max = (unsigned long)ret + size;
63598+ if ((unsigned long)ret < module_addr_min_rw)
63599+ module_addr_min_rw = (unsigned long)ret;
63600+ if ((unsigned long)ret + size > module_addr_max_rw)
63601+ module_addr_max_rw = (unsigned long)ret + size;
63602+ }
63603+ return ret;
63604+}
63605+
63606+static void *module_alloc_update_bounds_rx(unsigned long size)
63607+{
63608+ void *ret = module_alloc_exec(size);
63609+
63610+ if (ret) {
63611+ /* Update module bounds. */
63612+ if ((unsigned long)ret < module_addr_min_rx)
63613+ module_addr_min_rx = (unsigned long)ret;
63614+ if ((unsigned long)ret + size > module_addr_max_rx)
63615+ module_addr_max_rx = (unsigned long)ret + size;
63616 }
63617 return ret;
63618 }
63619@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
63620 unsigned int i;
63621
63622 /* only scan the sections containing data */
63623- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
63624- (unsigned long)mod->module_core,
63625+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
63626+ (unsigned long)mod->module_core_rw,
63627 sizeof(struct module), GFP_KERNEL);
63628
63629 for (i = 1; i < hdr->e_shnum; i++) {
63630@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
63631 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
63632 continue;
63633
63634- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
63635- (unsigned long)mod->module_core,
63636+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
63637+ (unsigned long)mod->module_core_rw,
63638 sechdrs[i].sh_size, GFP_KERNEL);
63639 }
63640 }
63641@@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
63642 secstrings, &stroffs, strmap);
63643
63644 /* Do the allocs. */
63645- ptr = module_alloc_update_bounds(mod->core_size);
63646+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
63647 /*
63648 * The pointer to this block is stored in the module structure
63649 * which is inside the block. Just mark it as not being a
63650@@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
63651 err = -ENOMEM;
63652 goto free_percpu;
63653 }
63654- memset(ptr, 0, mod->core_size);
63655- mod->module_core = ptr;
63656+ memset(ptr, 0, mod->core_size_rw);
63657+ mod->module_core_rw = ptr;
63658
63659- ptr = module_alloc_update_bounds(mod->init_size);
63660+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
63661 /*
63662 * The pointer to this block is stored in the module structure
63663 * which is inside the block. This block doesn't need to be
63664 * scanned as it contains data and code that will be freed
63665 * after the module is initialized.
63666 */
63667- kmemleak_ignore(ptr);
63668- if (!ptr && mod->init_size) {
63669+ kmemleak_not_leak(ptr);
63670+ if (!ptr && mod->init_size_rw) {
63671+ err = -ENOMEM;
63672+ goto free_core_rw;
63673+ }
63674+ memset(ptr, 0, mod->init_size_rw);
63675+ mod->module_init_rw = ptr;
63676+
63677+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
63678+ kmemleak_not_leak(ptr);
63679+ if (!ptr) {
63680 err = -ENOMEM;
63681- goto free_core;
63682+ goto free_init_rw;
63683 }
63684- memset(ptr, 0, mod->init_size);
63685- mod->module_init = ptr;
63686+
63687+ pax_open_kernel();
63688+ memset(ptr, 0, mod->core_size_rx);
63689+ pax_close_kernel();
63690+ mod->module_core_rx = ptr;
63691+
63692+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
63693+ kmemleak_not_leak(ptr);
63694+ if (!ptr && mod->init_size_rx) {
63695+ err = -ENOMEM;
63696+ goto free_core_rx;
63697+ }
63698+
63699+ pax_open_kernel();
63700+ memset(ptr, 0, mod->init_size_rx);
63701+ pax_close_kernel();
63702+ mod->module_init_rx = ptr;
63703
63704 /* Transfer each section which specifies SHF_ALLOC */
63705 DEBUGP("final section addresses:\n");
63706@@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
63707 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
63708 continue;
63709
63710- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
63711- dest = mod->module_init
63712- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63713- else
63714- dest = mod->module_core + sechdrs[i].sh_entsize;
63715+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
63716+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
63717+ dest = mod->module_init_rw
63718+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63719+ else
63720+ dest = mod->module_init_rx
63721+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
63722+ } else {
63723+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
63724+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
63725+ else
63726+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
63727+ }
63728+
63729+ if (sechdrs[i].sh_type != SHT_NOBITS) {
63730
63731- if (sechdrs[i].sh_type != SHT_NOBITS)
63732- memcpy(dest, (void *)sechdrs[i].sh_addr,
63733- sechdrs[i].sh_size);
63734+#ifdef CONFIG_PAX_KERNEXEC
63735+#ifdef CONFIG_X86_64
63736+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
63737+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
63738+#endif
63739+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
63740+ pax_open_kernel();
63741+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
63742+ pax_close_kernel();
63743+ } else
63744+#endif
63745+
63746+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
63747+ }
63748 /* Update sh_addr to point to copy in image. */
63749- sechdrs[i].sh_addr = (unsigned long)dest;
63750+
63751+#ifdef CONFIG_PAX_KERNEXEC
63752+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
63753+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
63754+ else
63755+#endif
63756+
63757+ sechdrs[i].sh_addr = (unsigned long)dest;
63758 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
63759 }
63760 /* Module has been moved. */
63761@@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
63762 mod->name);
63763 if (!mod->refptr) {
63764 err = -ENOMEM;
63765- goto free_init;
63766+ goto free_init_rx;
63767 }
63768 #endif
63769 /* Now we've moved module, initialize linked lists, etc. */
63770@@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
63771 /* Set up MODINFO_ATTR fields */
63772 setup_modinfo(mod, sechdrs, infoindex);
63773
63774+ mod->args = args;
63775+
63776+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63777+ {
63778+ char *p, *p2;
63779+
63780+ if (strstr(mod->args, "grsec_modharden_netdev")) {
63781+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
63782+ err = -EPERM;
63783+ goto cleanup;
63784+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
63785+ p += strlen("grsec_modharden_normal");
63786+ p2 = strstr(p, "_");
63787+ if (p2) {
63788+ *p2 = '\0';
63789+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
63790+ *p2 = '_';
63791+ }
63792+ err = -EPERM;
63793+ goto cleanup;
63794+ }
63795+ }
63796+#endif
63797+
63798+
63799 /* Fix up syms, so that st_value is a pointer to location. */
63800 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
63801 mod);
63802@@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
63803
63804 /* Now do relocations. */
63805 for (i = 1; i < hdr->e_shnum; i++) {
63806- const char *strtab = (char *)sechdrs[strindex].sh_addr;
63807 unsigned int info = sechdrs[i].sh_info;
63808+ strtab = (char *)sechdrs[strindex].sh_addr;
63809
63810 /* Not a valid relocation section? */
63811 if (info >= hdr->e_shnum)
63812@@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
63813 * Do it before processing of module parameters, so the module
63814 * can provide parameter accessor functions of its own.
63815 */
63816- if (mod->module_init)
63817- flush_icache_range((unsigned long)mod->module_init,
63818- (unsigned long)mod->module_init
63819- + mod->init_size);
63820- flush_icache_range((unsigned long)mod->module_core,
63821- (unsigned long)mod->module_core + mod->core_size);
63822+ if (mod->module_init_rx)
63823+ flush_icache_range((unsigned long)mod->module_init_rx,
63824+ (unsigned long)mod->module_init_rx
63825+ + mod->init_size_rx);
63826+ flush_icache_range((unsigned long)mod->module_core_rx,
63827+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
63828
63829 set_fs(old_fs);
63830
63831- mod->args = args;
63832 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
63833 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
63834 mod->name);
63835@@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
63836 free_unload:
63837 module_unload_free(mod);
63838 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
63839+ free_init_rx:
63840 percpu_modfree(mod->refptr);
63841- free_init:
63842 #endif
63843- module_free(mod, mod->module_init);
63844- free_core:
63845- module_free(mod, mod->module_core);
63846+ module_free_exec(mod, mod->module_init_rx);
63847+ free_core_rx:
63848+ module_free_exec(mod, mod->module_core_rx);
63849+ free_init_rw:
63850+ module_free(mod, mod->module_init_rw);
63851+ free_core_rw:
63852+ module_free(mod, mod->module_core_rw);
63853 /* mod will be freed with core. Don't access it beyond this line! */
63854 free_percpu:
63855 if (percpu)
63856@@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
63857 mod->symtab = mod->core_symtab;
63858 mod->strtab = mod->core_strtab;
63859 #endif
63860- module_free(mod, mod->module_init);
63861- mod->module_init = NULL;
63862- mod->init_size = 0;
63863- mod->init_text_size = 0;
63864+ module_free(mod, mod->module_init_rw);
63865+ module_free_exec(mod, mod->module_init_rx);
63866+ mod->module_init_rw = NULL;
63867+ mod->module_init_rx = NULL;
63868+ mod->init_size_rw = 0;
63869+ mod->init_size_rx = 0;
63870 mutex_unlock(&module_mutex);
63871
63872 return 0;
63873@@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
63874 unsigned long nextval;
63875
63876 /* At worse, next value is at end of module */
63877- if (within_module_init(addr, mod))
63878- nextval = (unsigned long)mod->module_init+mod->init_text_size;
63879+ if (within_module_init_rx(addr, mod))
63880+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
63881+ else if (within_module_init_rw(addr, mod))
63882+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
63883+ else if (within_module_core_rx(addr, mod))
63884+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
63885+ else if (within_module_core_rw(addr, mod))
63886+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
63887 else
63888- nextval = (unsigned long)mod->module_core+mod->core_text_size;
63889+ return NULL;
63890
63891 /* Scan for closest preceeding symbol, and next symbol. (ELF
63892 starts real symbols at 1). */
63893@@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
63894 char buf[8];
63895
63896 seq_printf(m, "%s %u",
63897- mod->name, mod->init_size + mod->core_size);
63898+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
63899 print_unload_info(m, mod);
63900
63901 /* Informative for users. */
63902@@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
63903 mod->state == MODULE_STATE_COMING ? "Loading":
63904 "Live");
63905 /* Used by oprofile and other similar tools. */
63906- seq_printf(m, " 0x%p", mod->module_core);
63907+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
63908
63909 /* Taints info */
63910 if (mod->taints)
63911@@ -2981,7 +3128,17 @@ static const struct file_operations proc
63912
63913 static int __init proc_modules_init(void)
63914 {
63915+#ifndef CONFIG_GRKERNSEC_HIDESYM
63916+#ifdef CONFIG_GRKERNSEC_PROC_USER
63917+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
63918+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63919+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
63920+#else
63921 proc_create("modules", 0, NULL, &proc_modules_operations);
63922+#endif
63923+#else
63924+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
63925+#endif
63926 return 0;
63927 }
63928 module_init(proc_modules_init);
63929@@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
63930 {
63931 struct module *mod;
63932
63933- if (addr < module_addr_min || addr > module_addr_max)
63934+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
63935+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
63936 return NULL;
63937
63938 list_for_each_entry_rcu(mod, &modules, list)
63939- if (within_module_core(addr, mod)
63940- || within_module_init(addr, mod))
63941+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
63942 return mod;
63943 return NULL;
63944 }
63945@@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
63946 */
63947 struct module *__module_text_address(unsigned long addr)
63948 {
63949- struct module *mod = __module_address(addr);
63950+ struct module *mod;
63951+
63952+#ifdef CONFIG_X86_32
63953+ addr = ktla_ktva(addr);
63954+#endif
63955+
63956+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
63957+ return NULL;
63958+
63959+ mod = __module_address(addr);
63960+
63961 if (mod) {
63962 /* Make sure it's within the text section. */
63963- if (!within(addr, mod->module_init, mod->init_text_size)
63964- && !within(addr, mod->module_core, mod->core_text_size))
63965+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
63966 mod = NULL;
63967 }
63968 return mod;
63969diff -urNp linux-2.6.32.46/kernel/mutex.c linux-2.6.32.46/kernel/mutex.c
63970--- linux-2.6.32.46/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
63971+++ linux-2.6.32.46/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
63972@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
63973 */
63974
63975 for (;;) {
63976- struct thread_info *owner;
63977+ struct task_struct *owner;
63978
63979 /*
63980 * If we own the BKL, then don't spin. The owner of
63981@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
63982 spin_lock_mutex(&lock->wait_lock, flags);
63983
63984 debug_mutex_lock_common(lock, &waiter);
63985- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
63986+ debug_mutex_add_waiter(lock, &waiter, task);
63987
63988 /* add waiting tasks to the end of the waitqueue (FIFO): */
63989 list_add_tail(&waiter.list, &lock->wait_list);
63990@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
63991 * TASK_UNINTERRUPTIBLE case.)
63992 */
63993 if (unlikely(signal_pending_state(state, task))) {
63994- mutex_remove_waiter(lock, &waiter,
63995- task_thread_info(task));
63996+ mutex_remove_waiter(lock, &waiter, task);
63997 mutex_release(&lock->dep_map, 1, ip);
63998 spin_unlock_mutex(&lock->wait_lock, flags);
63999
64000@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
64001 done:
64002 lock_acquired(&lock->dep_map, ip);
64003 /* got the lock - rejoice! */
64004- mutex_remove_waiter(lock, &waiter, current_thread_info());
64005+ mutex_remove_waiter(lock, &waiter, task);
64006 mutex_set_owner(lock);
64007
64008 /* set it to 0 if there are no waiters left: */
64009diff -urNp linux-2.6.32.46/kernel/mutex-debug.c linux-2.6.32.46/kernel/mutex-debug.c
64010--- linux-2.6.32.46/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
64011+++ linux-2.6.32.46/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
64012@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64013 }
64014
64015 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64016- struct thread_info *ti)
64017+ struct task_struct *task)
64018 {
64019 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64020
64021 /* Mark the current thread as blocked on the lock: */
64022- ti->task->blocked_on = waiter;
64023+ task->blocked_on = waiter;
64024 }
64025
64026 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64027- struct thread_info *ti)
64028+ struct task_struct *task)
64029 {
64030 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64031- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64032- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64033- ti->task->blocked_on = NULL;
64034+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
64035+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64036+ task->blocked_on = NULL;
64037
64038 list_del_init(&waiter->list);
64039 waiter->task = NULL;
64040@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
64041 return;
64042
64043 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
64044- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
64045+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
64046 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
64047 mutex_clear_owner(lock);
64048 }
64049diff -urNp linux-2.6.32.46/kernel/mutex-debug.h linux-2.6.32.46/kernel/mutex-debug.h
64050--- linux-2.6.32.46/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
64051+++ linux-2.6.32.46/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
64052@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
64053 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64054 extern void debug_mutex_add_waiter(struct mutex *lock,
64055 struct mutex_waiter *waiter,
64056- struct thread_info *ti);
64057+ struct task_struct *task);
64058 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64059- struct thread_info *ti);
64060+ struct task_struct *task);
64061 extern void debug_mutex_unlock(struct mutex *lock);
64062 extern void debug_mutex_init(struct mutex *lock, const char *name,
64063 struct lock_class_key *key);
64064
64065 static inline void mutex_set_owner(struct mutex *lock)
64066 {
64067- lock->owner = current_thread_info();
64068+ lock->owner = current;
64069 }
64070
64071 static inline void mutex_clear_owner(struct mutex *lock)
64072diff -urNp linux-2.6.32.46/kernel/mutex.h linux-2.6.32.46/kernel/mutex.h
64073--- linux-2.6.32.46/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
64074+++ linux-2.6.32.46/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
64075@@ -19,7 +19,7 @@
64076 #ifdef CONFIG_SMP
64077 static inline void mutex_set_owner(struct mutex *lock)
64078 {
64079- lock->owner = current_thread_info();
64080+ lock->owner = current;
64081 }
64082
64083 static inline void mutex_clear_owner(struct mutex *lock)
64084diff -urNp linux-2.6.32.46/kernel/panic.c linux-2.6.32.46/kernel/panic.c
64085--- linux-2.6.32.46/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
64086+++ linux-2.6.32.46/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
64087@@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
64088 const char *board;
64089
64090 printk(KERN_WARNING "------------[ cut here ]------------\n");
64091- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64092+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64093 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64094 if (board)
64095 printk(KERN_WARNING "Hardware name: %s\n", board);
64096@@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64097 */
64098 void __stack_chk_fail(void)
64099 {
64100- panic("stack-protector: Kernel stack is corrupted in: %p\n",
64101+ dump_stack();
64102+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64103 __builtin_return_address(0));
64104 }
64105 EXPORT_SYMBOL(__stack_chk_fail);
64106diff -urNp linux-2.6.32.46/kernel/params.c linux-2.6.32.46/kernel/params.c
64107--- linux-2.6.32.46/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
64108+++ linux-2.6.32.46/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
64109@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
64110 return ret;
64111 }
64112
64113-static struct sysfs_ops module_sysfs_ops = {
64114+static const struct sysfs_ops module_sysfs_ops = {
64115 .show = module_attr_show,
64116 .store = module_attr_store,
64117 };
64118@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
64119 return 0;
64120 }
64121
64122-static struct kset_uevent_ops module_uevent_ops = {
64123+static const struct kset_uevent_ops module_uevent_ops = {
64124 .filter = uevent_filter,
64125 };
64126
64127diff -urNp linux-2.6.32.46/kernel/perf_event.c linux-2.6.32.46/kernel/perf_event.c
64128--- linux-2.6.32.46/kernel/perf_event.c 2011-08-09 18:35:30.000000000 -0400
64129+++ linux-2.6.32.46/kernel/perf_event.c 2011-08-09 18:34:01.000000000 -0400
64130@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
64131 */
64132 int sysctl_perf_event_sample_rate __read_mostly = 100000;
64133
64134-static atomic64_t perf_event_id;
64135+static atomic64_unchecked_t perf_event_id;
64136
64137 /*
64138 * Lock for (sysadmin-configurable) event reservations:
64139@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
64140 * In order to keep per-task stats reliable we need to flip the event
64141 * values when we flip the contexts.
64142 */
64143- value = atomic64_read(&next_event->count);
64144- value = atomic64_xchg(&event->count, value);
64145- atomic64_set(&next_event->count, value);
64146+ value = atomic64_read_unchecked(&next_event->count);
64147+ value = atomic64_xchg_unchecked(&event->count, value);
64148+ atomic64_set_unchecked(&next_event->count, value);
64149
64150 swap(event->total_time_enabled, next_event->total_time_enabled);
64151 swap(event->total_time_running, next_event->total_time_running);
64152@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
64153 update_event_times(event);
64154 }
64155
64156- return atomic64_read(&event->count);
64157+ return atomic64_read_unchecked(&event->count);
64158 }
64159
64160 /*
64161@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
64162 values[n++] = 1 + leader->nr_siblings;
64163 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64164 values[n++] = leader->total_time_enabled +
64165- atomic64_read(&leader->child_total_time_enabled);
64166+ atomic64_read_unchecked(&leader->child_total_time_enabled);
64167 }
64168 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64169 values[n++] = leader->total_time_running +
64170- atomic64_read(&leader->child_total_time_running);
64171+ atomic64_read_unchecked(&leader->child_total_time_running);
64172 }
64173
64174 size = n * sizeof(u64);
64175@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
64176 values[n++] = perf_event_read_value(event);
64177 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64178 values[n++] = event->total_time_enabled +
64179- atomic64_read(&event->child_total_time_enabled);
64180+ atomic64_read_unchecked(&event->child_total_time_enabled);
64181 }
64182 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64183 values[n++] = event->total_time_running +
64184- atomic64_read(&event->child_total_time_running);
64185+ atomic64_read_unchecked(&event->child_total_time_running);
64186 }
64187 if (read_format & PERF_FORMAT_ID)
64188 values[n++] = primary_event_id(event);
64189@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
64190 static void perf_event_reset(struct perf_event *event)
64191 {
64192 (void)perf_event_read(event);
64193- atomic64_set(&event->count, 0);
64194+ atomic64_set_unchecked(&event->count, 0);
64195 perf_event_update_userpage(event);
64196 }
64197
64198@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
64199 ++userpg->lock;
64200 barrier();
64201 userpg->index = perf_event_index(event);
64202- userpg->offset = atomic64_read(&event->count);
64203+ userpg->offset = atomic64_read_unchecked(&event->count);
64204 if (event->state == PERF_EVENT_STATE_ACTIVE)
64205- userpg->offset -= atomic64_read(&event->hw.prev_count);
64206+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
64207
64208 userpg->time_enabled = event->total_time_enabled +
64209- atomic64_read(&event->child_total_time_enabled);
64210+ atomic64_read_unchecked(&event->child_total_time_enabled);
64211
64212 userpg->time_running = event->total_time_running +
64213- atomic64_read(&event->child_total_time_running);
64214+ atomic64_read_unchecked(&event->child_total_time_running);
64215
64216 barrier();
64217 ++userpg->lock;
64218@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
64219 u64 values[4];
64220 int n = 0;
64221
64222- values[n++] = atomic64_read(&event->count);
64223+ values[n++] = atomic64_read_unchecked(&event->count);
64224 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64225 values[n++] = event->total_time_enabled +
64226- atomic64_read(&event->child_total_time_enabled);
64227+ atomic64_read_unchecked(&event->child_total_time_enabled);
64228 }
64229 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64230 values[n++] = event->total_time_running +
64231- atomic64_read(&event->child_total_time_running);
64232+ atomic64_read_unchecked(&event->child_total_time_running);
64233 }
64234 if (read_format & PERF_FORMAT_ID)
64235 values[n++] = primary_event_id(event);
64236@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
64237 if (leader != event)
64238 leader->pmu->read(leader);
64239
64240- values[n++] = atomic64_read(&leader->count);
64241+ values[n++] = atomic64_read_unchecked(&leader->count);
64242 if (read_format & PERF_FORMAT_ID)
64243 values[n++] = primary_event_id(leader);
64244
64245@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
64246 if (sub != event)
64247 sub->pmu->read(sub);
64248
64249- values[n++] = atomic64_read(&sub->count);
64250+ values[n++] = atomic64_read_unchecked(&sub->count);
64251 if (read_format & PERF_FORMAT_ID)
64252 values[n++] = primary_event_id(sub);
64253
64254@@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf
64255 {
64256 struct hw_perf_event *hwc = &event->hw;
64257
64258- atomic64_add(nr, &event->count);
64259+ atomic64_add_unchecked(nr, &event->count);
64260
64261 if (!hwc->sample_period)
64262 return;
64263@@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(
64264 u64 now;
64265
64266 now = cpu_clock(cpu);
64267- prev = atomic64_read(&event->hw.prev_count);
64268- atomic64_set(&event->hw.prev_count, now);
64269- atomic64_add(now - prev, &event->count);
64270+ prev = atomic64_read_unchecked(&event->hw.prev_count);
64271+ atomic64_set_unchecked(&event->hw.prev_count, now);
64272+ atomic64_add_unchecked(now - prev, &event->count);
64273 }
64274
64275 static int cpu_clock_perf_event_enable(struct perf_event *event)
64276@@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(s
64277 struct hw_perf_event *hwc = &event->hw;
64278 int cpu = raw_smp_processor_id();
64279
64280- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
64281+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
64282 perf_swevent_start_hrtimer(event);
64283
64284 return 0;
64285@@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update
64286 u64 prev;
64287 s64 delta;
64288
64289- prev = atomic64_xchg(&event->hw.prev_count, now);
64290+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
64291 delta = now - prev;
64292- atomic64_add(delta, &event->count);
64293+ atomic64_add_unchecked(delta, &event->count);
64294 }
64295
64296 static int task_clock_perf_event_enable(struct perf_event *event)
64297@@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(
64298
64299 now = event->ctx->time;
64300
64301- atomic64_set(&hwc->prev_count, now);
64302+ atomic64_set_unchecked(&hwc->prev_count, now);
64303
64304 perf_swevent_start_hrtimer(event);
64305
64306@@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr
64307 event->parent = parent_event;
64308
64309 event->ns = get_pid_ns(current->nsproxy->pid_ns);
64310- event->id = atomic64_inc_return(&perf_event_id);
64311+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
64312
64313 event->state = PERF_EVENT_STATE_INACTIVE;
64314
64315@@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf
64316 if (child_event->attr.inherit_stat)
64317 perf_event_read_event(child_event, child);
64318
64319- child_val = atomic64_read(&child_event->count);
64320+ child_val = atomic64_read_unchecked(&child_event->count);
64321
64322 /*
64323 * Add back the child's count to the parent's count:
64324 */
64325- atomic64_add(child_val, &parent_event->count);
64326- atomic64_add(child_event->total_time_enabled,
64327+ atomic64_add_unchecked(child_val, &parent_event->count);
64328+ atomic64_add_unchecked(child_event->total_time_enabled,
64329 &parent_event->child_total_time_enabled);
64330- atomic64_add(child_event->total_time_running,
64331+ atomic64_add_unchecked(child_event->total_time_running,
64332 &parent_event->child_total_time_running);
64333
64334 /*
64335diff -urNp linux-2.6.32.46/kernel/pid.c linux-2.6.32.46/kernel/pid.c
64336--- linux-2.6.32.46/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
64337+++ linux-2.6.32.46/kernel/pid.c 2011-08-21 19:11:29.000000000 -0400
64338@@ -33,6 +33,7 @@
64339 #include <linux/rculist.h>
64340 #include <linux/bootmem.h>
64341 #include <linux/hash.h>
64342+#include <linux/security.h>
64343 #include <linux/pid_namespace.h>
64344 #include <linux/init_task.h>
64345 #include <linux/syscalls.h>
64346@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
64347
64348 int pid_max = PID_MAX_DEFAULT;
64349
64350-#define RESERVED_PIDS 300
64351+#define RESERVED_PIDS 500
64352
64353 int pid_max_min = RESERVED_PIDS + 1;
64354 int pid_max_max = PID_MAX_LIMIT;
64355@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
64356 */
64357 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64358 {
64359- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64360+ struct task_struct *task;
64361+
64362+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64363+
64364+ if (gr_pid_is_chrooted(task))
64365+ return NULL;
64366+
64367+ return task;
64368 }
64369
64370 struct task_struct *find_task_by_vpid(pid_t vnr)
64371@@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pi
64372 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64373 }
64374
64375+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64376+{
64377+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64378+}
64379+
64380 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64381 {
64382 struct pid *pid;
64383diff -urNp linux-2.6.32.46/kernel/posix-cpu-timers.c linux-2.6.32.46/kernel/posix-cpu-timers.c
64384--- linux-2.6.32.46/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
64385+++ linux-2.6.32.46/kernel/posix-cpu-timers.c 2011-08-06 09:33:44.000000000 -0400
64386@@ -6,6 +6,7 @@
64387 #include <linux/posix-timers.h>
64388 #include <linux/errno.h>
64389 #include <linux/math64.h>
64390+#include <linux/security.h>
64391 #include <asm/uaccess.h>
64392 #include <linux/kernel_stat.h>
64393 #include <trace/events/timer.h>
64394@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(st
64395
64396 static __init int init_posix_cpu_timers(void)
64397 {
64398- struct k_clock process = {
64399+ static struct k_clock process = {
64400 .clock_getres = process_cpu_clock_getres,
64401 .clock_get = process_cpu_clock_get,
64402 .clock_set = do_posix_clock_nosettime,
64403@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(
64404 .nsleep = process_cpu_nsleep,
64405 .nsleep_restart = process_cpu_nsleep_restart,
64406 };
64407- struct k_clock thread = {
64408+ static struct k_clock thread = {
64409 .clock_getres = thread_cpu_clock_getres,
64410 .clock_get = thread_cpu_clock_get,
64411 .clock_set = do_posix_clock_nosettime,
64412diff -urNp linux-2.6.32.46/kernel/posix-timers.c linux-2.6.32.46/kernel/posix-timers.c
64413--- linux-2.6.32.46/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
64414+++ linux-2.6.32.46/kernel/posix-timers.c 2011-08-23 20:22:38.000000000 -0400
64415@@ -42,6 +42,7 @@
64416 #include <linux/compiler.h>
64417 #include <linux/idr.h>
64418 #include <linux/posix-timers.h>
64419+#include <linux/grsecurity.h>
64420 #include <linux/syscalls.h>
64421 #include <linux/wait.h>
64422 #include <linux/workqueue.h>
64423@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
64424 * which we beg off on and pass to do_sys_settimeofday().
64425 */
64426
64427-static struct k_clock posix_clocks[MAX_CLOCKS];
64428+static struct k_clock *posix_clocks[MAX_CLOCKS];
64429
64430 /*
64431 * These ones are defined below.
64432@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k
64433 */
64434 #define CLOCK_DISPATCH(clock, call, arglist) \
64435 ((clock) < 0 ? posix_cpu_##call arglist : \
64436- (posix_clocks[clock].call != NULL \
64437- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
64438+ (posix_clocks[clock]->call != NULL \
64439+ ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
64440
64441 /*
64442 * Default clock hook functions when the struct k_clock passed
64443@@ -172,7 +173,7 @@ static inline int common_clock_getres(co
64444 struct timespec *tp)
64445 {
64446 tp->tv_sec = 0;
64447- tp->tv_nsec = posix_clocks[which_clock].res;
64448+ tp->tv_nsec = posix_clocks[which_clock]->res;
64449 return 0;
64450 }
64451
64452@@ -217,9 +218,11 @@ static inline int invalid_clockid(const
64453 return 0;
64454 if ((unsigned) which_clock >= MAX_CLOCKS)
64455 return 1;
64456- if (posix_clocks[which_clock].clock_getres != NULL)
64457+ if (posix_clocks[which_clock] == NULL)
64458 return 0;
64459- if (posix_clocks[which_clock].res != 0)
64460+ if (posix_clocks[which_clock]->clock_getres != NULL)
64461+ return 0;
64462+ if (posix_clocks[which_clock]->res != 0)
64463 return 0;
64464 return 1;
64465 }
64466@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t
64467 */
64468 static __init int init_posix_timers(void)
64469 {
64470- struct k_clock clock_realtime = {
64471+ static struct k_clock clock_realtime = {
64472 .clock_getres = hrtimer_get_res,
64473 };
64474- struct k_clock clock_monotonic = {
64475+ static struct k_clock clock_monotonic = {
64476 .clock_getres = hrtimer_get_res,
64477 .clock_get = posix_ktime_get_ts,
64478 .clock_set = do_posix_clock_nosettime,
64479 };
64480- struct k_clock clock_monotonic_raw = {
64481+ static struct k_clock clock_monotonic_raw = {
64482 .clock_getres = hrtimer_get_res,
64483 .clock_get = posix_get_monotonic_raw,
64484 .clock_set = do_posix_clock_nosettime,
64485 .timer_create = no_timer_create,
64486 .nsleep = no_nsleep,
64487 };
64488- struct k_clock clock_realtime_coarse = {
64489+ static struct k_clock clock_realtime_coarse = {
64490 .clock_getres = posix_get_coarse_res,
64491 .clock_get = posix_get_realtime_coarse,
64492 .clock_set = do_posix_clock_nosettime,
64493 .timer_create = no_timer_create,
64494 .nsleep = no_nsleep,
64495 };
64496- struct k_clock clock_monotonic_coarse = {
64497+ static struct k_clock clock_monotonic_coarse = {
64498 .clock_getres = posix_get_coarse_res,
64499 .clock_get = posix_get_monotonic_coarse,
64500 .clock_set = do_posix_clock_nosettime,
64501@@ -296,6 +299,8 @@ static __init int init_posix_timers(void
64502 .nsleep = no_nsleep,
64503 };
64504
64505+ pax_track_stack();
64506+
64507 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
64508 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
64509 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
64510@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_
64511 return;
64512 }
64513
64514- posix_clocks[clock_id] = *new_clock;
64515+ posix_clocks[clock_id] = new_clock;
64516 }
64517 EXPORT_SYMBOL_GPL(register_posix_clock);
64518
64519@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
64520 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64521 return -EFAULT;
64522
64523+ /* only the CLOCK_REALTIME clock can be set, all other clocks
64524+ have their clock_set fptr set to a nosettime dummy function
64525+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64526+ call common_clock_set, which calls do_sys_settimeofday, which
64527+ we hook
64528+ */
64529+
64530 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
64531 }
64532
64533diff -urNp linux-2.6.32.46/kernel/power/hibernate.c linux-2.6.32.46/kernel/power/hibernate.c
64534--- linux-2.6.32.46/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
64535+++ linux-2.6.32.46/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
64536@@ -48,14 +48,14 @@ enum {
64537
64538 static int hibernation_mode = HIBERNATION_SHUTDOWN;
64539
64540-static struct platform_hibernation_ops *hibernation_ops;
64541+static const struct platform_hibernation_ops *hibernation_ops;
64542
64543 /**
64544 * hibernation_set_ops - set the global hibernate operations
64545 * @ops: the hibernation operations to use in subsequent hibernation transitions
64546 */
64547
64548-void hibernation_set_ops(struct platform_hibernation_ops *ops)
64549+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
64550 {
64551 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
64552 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
64553diff -urNp linux-2.6.32.46/kernel/power/poweroff.c linux-2.6.32.46/kernel/power/poweroff.c
64554--- linux-2.6.32.46/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
64555+++ linux-2.6.32.46/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
64556@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
64557 .enable_mask = SYSRQ_ENABLE_BOOT,
64558 };
64559
64560-static int pm_sysrq_init(void)
64561+static int __init pm_sysrq_init(void)
64562 {
64563 register_sysrq_key('o', &sysrq_poweroff_op);
64564 return 0;
64565diff -urNp linux-2.6.32.46/kernel/power/process.c linux-2.6.32.46/kernel/power/process.c
64566--- linux-2.6.32.46/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
64567+++ linux-2.6.32.46/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
64568@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
64569 struct timeval start, end;
64570 u64 elapsed_csecs64;
64571 unsigned int elapsed_csecs;
64572+ bool timedout = false;
64573
64574 do_gettimeofday(&start);
64575
64576 end_time = jiffies + TIMEOUT;
64577 do {
64578 todo = 0;
64579+ if (time_after(jiffies, end_time))
64580+ timedout = true;
64581 read_lock(&tasklist_lock);
64582 do_each_thread(g, p) {
64583 if (frozen(p) || !freezeable(p))
64584@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
64585 * It is "frozen enough". If the task does wake
64586 * up, it will immediately call try_to_freeze.
64587 */
64588- if (!task_is_stopped_or_traced(p) &&
64589- !freezer_should_skip(p))
64590+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
64591 todo++;
64592+ if (timedout) {
64593+ printk(KERN_ERR "Task refusing to freeze:\n");
64594+ sched_show_task(p);
64595+ }
64596+ }
64597 } while_each_thread(g, p);
64598 read_unlock(&tasklist_lock);
64599 yield(); /* Yield is okay here */
64600- if (time_after(jiffies, end_time))
64601- break;
64602- } while (todo);
64603+ } while (todo && !timedout);
64604
64605 do_gettimeofday(&end);
64606 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
64607diff -urNp linux-2.6.32.46/kernel/power/suspend.c linux-2.6.32.46/kernel/power/suspend.c
64608--- linux-2.6.32.46/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
64609+++ linux-2.6.32.46/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
64610@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
64611 [PM_SUSPEND_MEM] = "mem",
64612 };
64613
64614-static struct platform_suspend_ops *suspend_ops;
64615+static const struct platform_suspend_ops *suspend_ops;
64616
64617 /**
64618 * suspend_set_ops - Set the global suspend method table.
64619 * @ops: Pointer to ops structure.
64620 */
64621-void suspend_set_ops(struct platform_suspend_ops *ops)
64622+void suspend_set_ops(const struct platform_suspend_ops *ops)
64623 {
64624 mutex_lock(&pm_mutex);
64625 suspend_ops = ops;
64626diff -urNp linux-2.6.32.46/kernel/printk.c linux-2.6.32.46/kernel/printk.c
64627--- linux-2.6.32.46/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
64628+++ linux-2.6.32.46/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
64629@@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
64630 char c;
64631 int error = 0;
64632
64633+#ifdef CONFIG_GRKERNSEC_DMESG
64634+ if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
64635+ return -EPERM;
64636+#endif
64637+
64638 error = security_syslog(type);
64639 if (error)
64640 return error;
64641diff -urNp linux-2.6.32.46/kernel/profile.c linux-2.6.32.46/kernel/profile.c
64642--- linux-2.6.32.46/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
64643+++ linux-2.6.32.46/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
64644@@ -39,7 +39,7 @@ struct profile_hit {
64645 /* Oprofile timer tick hook */
64646 static int (*timer_hook)(struct pt_regs *) __read_mostly;
64647
64648-static atomic_t *prof_buffer;
64649+static atomic_unchecked_t *prof_buffer;
64650 static unsigned long prof_len, prof_shift;
64651
64652 int prof_on __read_mostly;
64653@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
64654 hits[i].pc = 0;
64655 continue;
64656 }
64657- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64658+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64659 hits[i].hits = hits[i].pc = 0;
64660 }
64661 }
64662@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
64663 * Add the current hit(s) and flush the write-queue out
64664 * to the global buffer:
64665 */
64666- atomic_add(nr_hits, &prof_buffer[pc]);
64667+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
64668 for (i = 0; i < NR_PROFILE_HIT; ++i) {
64669- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
64670+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
64671 hits[i].pc = hits[i].hits = 0;
64672 }
64673 out:
64674@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
64675 if (prof_on != type || !prof_buffer)
64676 return;
64677 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
64678- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64679+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
64680 }
64681 #endif /* !CONFIG_SMP */
64682 EXPORT_SYMBOL_GPL(profile_hits);
64683@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
64684 return -EFAULT;
64685 buf++; p++; count--; read++;
64686 }
64687- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
64688+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
64689 if (copy_to_user(buf, (void *)pnt, count))
64690 return -EFAULT;
64691 read += count;
64692@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
64693 }
64694 #endif
64695 profile_discard_flip_buffers();
64696- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
64697+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
64698 return count;
64699 }
64700
64701diff -urNp linux-2.6.32.46/kernel/ptrace.c linux-2.6.32.46/kernel/ptrace.c
64702--- linux-2.6.32.46/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
64703+++ linux-2.6.32.46/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
64704@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
64705 return ret;
64706 }
64707
64708-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
64709+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
64710+ unsigned int log)
64711 {
64712 const struct cred *cred = current_cred(), *tcred;
64713
64714@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
64715 cred->gid != tcred->egid ||
64716 cred->gid != tcred->sgid ||
64717 cred->gid != tcred->gid) &&
64718- !capable(CAP_SYS_PTRACE)) {
64719+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
64720+ (log && !capable(CAP_SYS_PTRACE)))
64721+ ) {
64722 rcu_read_unlock();
64723 return -EPERM;
64724 }
64725@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
64726 smp_rmb();
64727 if (task->mm)
64728 dumpable = get_dumpable(task->mm);
64729- if (!dumpable && !capable(CAP_SYS_PTRACE))
64730+ if (!dumpable &&
64731+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
64732+ (log && !capable(CAP_SYS_PTRACE))))
64733 return -EPERM;
64734
64735 return security_ptrace_access_check(task, mode);
64736@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
64737 {
64738 int err;
64739 task_lock(task);
64740- err = __ptrace_may_access(task, mode);
64741+ err = __ptrace_may_access(task, mode, 0);
64742+ task_unlock(task);
64743+ return !err;
64744+}
64745+
64746+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
64747+{
64748+ int err;
64749+ task_lock(task);
64750+ err = __ptrace_may_access(task, mode, 1);
64751 task_unlock(task);
64752 return !err;
64753 }
64754@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
64755 goto out;
64756
64757 task_lock(task);
64758- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
64759+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
64760 task_unlock(task);
64761 if (retval)
64762 goto unlock_creds;
64763@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
64764 goto unlock_tasklist;
64765
64766 task->ptrace = PT_PTRACED;
64767- if (capable(CAP_SYS_PTRACE))
64768+ if (capable_nolog(CAP_SYS_PTRACE))
64769 task->ptrace |= PT_PTRACE_CAP;
64770
64771 __ptrace_link(task, current);
64772@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
64773 {
64774 int copied = 0;
64775
64776+ pax_track_stack();
64777+
64778 while (len > 0) {
64779 char buf[128];
64780 int this_len, retval;
64781@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
64782 {
64783 int copied = 0;
64784
64785+ pax_track_stack();
64786+
64787 while (len > 0) {
64788 char buf[128];
64789 int this_len, retval;
64790@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
64791 int ret = -EIO;
64792 siginfo_t siginfo;
64793
64794+ pax_track_stack();
64795+
64796 switch (request) {
64797 case PTRACE_PEEKTEXT:
64798 case PTRACE_PEEKDATA:
64799@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
64800 ret = ptrace_setoptions(child, data);
64801 break;
64802 case PTRACE_GETEVENTMSG:
64803- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
64804+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
64805 break;
64806
64807 case PTRACE_GETSIGINFO:
64808 ret = ptrace_getsiginfo(child, &siginfo);
64809 if (!ret)
64810- ret = copy_siginfo_to_user((siginfo_t __user *) data,
64811+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
64812 &siginfo);
64813 break;
64814
64815 case PTRACE_SETSIGINFO:
64816- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
64817+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
64818 sizeof siginfo))
64819 ret = -EFAULT;
64820 else
64821@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
64822 goto out;
64823 }
64824
64825+ if (gr_handle_ptrace(child, request)) {
64826+ ret = -EPERM;
64827+ goto out_put_task_struct;
64828+ }
64829+
64830 if (request == PTRACE_ATTACH) {
64831 ret = ptrace_attach(child);
64832 /*
64833 * Some architectures need to do book-keeping after
64834 * a ptrace attach.
64835 */
64836- if (!ret)
64837+ if (!ret) {
64838 arch_ptrace_attach(child);
64839+ gr_audit_ptrace(child);
64840+ }
64841 goto out_put_task_struct;
64842 }
64843
64844@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
64845 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
64846 if (copied != sizeof(tmp))
64847 return -EIO;
64848- return put_user(tmp, (unsigned long __user *)data);
64849+ return put_user(tmp, (__force unsigned long __user *)data);
64850 }
64851
64852 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
64853@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
64854 siginfo_t siginfo;
64855 int ret;
64856
64857+ pax_track_stack();
64858+
64859 switch (request) {
64860 case PTRACE_PEEKTEXT:
64861 case PTRACE_PEEKDATA:
64862@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
64863 goto out;
64864 }
64865
64866+ if (gr_handle_ptrace(child, request)) {
64867+ ret = -EPERM;
64868+ goto out_put_task_struct;
64869+ }
64870+
64871 if (request == PTRACE_ATTACH) {
64872 ret = ptrace_attach(child);
64873 /*
64874 * Some architectures need to do book-keeping after
64875 * a ptrace attach.
64876 */
64877- if (!ret)
64878+ if (!ret) {
64879 arch_ptrace_attach(child);
64880+ gr_audit_ptrace(child);
64881+ }
64882 goto out_put_task_struct;
64883 }
64884
64885diff -urNp linux-2.6.32.46/kernel/rcutorture.c linux-2.6.32.46/kernel/rcutorture.c
64886--- linux-2.6.32.46/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
64887+++ linux-2.6.32.46/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
64888@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
64889 { 0 };
64890 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
64891 { 0 };
64892-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
64893-static atomic_t n_rcu_torture_alloc;
64894-static atomic_t n_rcu_torture_alloc_fail;
64895-static atomic_t n_rcu_torture_free;
64896-static atomic_t n_rcu_torture_mberror;
64897-static atomic_t n_rcu_torture_error;
64898+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
64899+static atomic_unchecked_t n_rcu_torture_alloc;
64900+static atomic_unchecked_t n_rcu_torture_alloc_fail;
64901+static atomic_unchecked_t n_rcu_torture_free;
64902+static atomic_unchecked_t n_rcu_torture_mberror;
64903+static atomic_unchecked_t n_rcu_torture_error;
64904 static long n_rcu_torture_timers;
64905 static struct list_head rcu_torture_removed;
64906 static cpumask_var_t shuffle_tmp_mask;
64907@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
64908
64909 spin_lock_bh(&rcu_torture_lock);
64910 if (list_empty(&rcu_torture_freelist)) {
64911- atomic_inc(&n_rcu_torture_alloc_fail);
64912+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
64913 spin_unlock_bh(&rcu_torture_lock);
64914 return NULL;
64915 }
64916- atomic_inc(&n_rcu_torture_alloc);
64917+ atomic_inc_unchecked(&n_rcu_torture_alloc);
64918 p = rcu_torture_freelist.next;
64919 list_del_init(p);
64920 spin_unlock_bh(&rcu_torture_lock);
64921@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
64922 static void
64923 rcu_torture_free(struct rcu_torture *p)
64924 {
64925- atomic_inc(&n_rcu_torture_free);
64926+ atomic_inc_unchecked(&n_rcu_torture_free);
64927 spin_lock_bh(&rcu_torture_lock);
64928 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
64929 spin_unlock_bh(&rcu_torture_lock);
64930@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
64931 i = rp->rtort_pipe_count;
64932 if (i > RCU_TORTURE_PIPE_LEN)
64933 i = RCU_TORTURE_PIPE_LEN;
64934- atomic_inc(&rcu_torture_wcount[i]);
64935+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
64936 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
64937 rp->rtort_mbtest = 0;
64938 rcu_torture_free(rp);
64939@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
64940 i = rp->rtort_pipe_count;
64941 if (i > RCU_TORTURE_PIPE_LEN)
64942 i = RCU_TORTURE_PIPE_LEN;
64943- atomic_inc(&rcu_torture_wcount[i]);
64944+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
64945 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
64946 rp->rtort_mbtest = 0;
64947 list_del(&rp->rtort_free);
64948@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
64949 i = old_rp->rtort_pipe_count;
64950 if (i > RCU_TORTURE_PIPE_LEN)
64951 i = RCU_TORTURE_PIPE_LEN;
64952- atomic_inc(&rcu_torture_wcount[i]);
64953+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
64954 old_rp->rtort_pipe_count++;
64955 cur_ops->deferred_free(old_rp);
64956 }
64957@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
64958 return;
64959 }
64960 if (p->rtort_mbtest == 0)
64961- atomic_inc(&n_rcu_torture_mberror);
64962+ atomic_inc_unchecked(&n_rcu_torture_mberror);
64963 spin_lock(&rand_lock);
64964 cur_ops->read_delay(&rand);
64965 n_rcu_torture_timers++;
64966@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
64967 continue;
64968 }
64969 if (p->rtort_mbtest == 0)
64970- atomic_inc(&n_rcu_torture_mberror);
64971+ atomic_inc_unchecked(&n_rcu_torture_mberror);
64972 cur_ops->read_delay(&rand);
64973 preempt_disable();
64974 pipe_count = p->rtort_pipe_count;
64975@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
64976 rcu_torture_current,
64977 rcu_torture_current_version,
64978 list_empty(&rcu_torture_freelist),
64979- atomic_read(&n_rcu_torture_alloc),
64980- atomic_read(&n_rcu_torture_alloc_fail),
64981- atomic_read(&n_rcu_torture_free),
64982- atomic_read(&n_rcu_torture_mberror),
64983+ atomic_read_unchecked(&n_rcu_torture_alloc),
64984+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
64985+ atomic_read_unchecked(&n_rcu_torture_free),
64986+ atomic_read_unchecked(&n_rcu_torture_mberror),
64987 n_rcu_torture_timers);
64988- if (atomic_read(&n_rcu_torture_mberror) != 0)
64989+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
64990 cnt += sprintf(&page[cnt], " !!!");
64991 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
64992 if (i > 1) {
64993 cnt += sprintf(&page[cnt], "!!! ");
64994- atomic_inc(&n_rcu_torture_error);
64995+ atomic_inc_unchecked(&n_rcu_torture_error);
64996 WARN_ON_ONCE(1);
64997 }
64998 cnt += sprintf(&page[cnt], "Reader Pipe: ");
64999@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
65000 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65001 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65002 cnt += sprintf(&page[cnt], " %d",
65003- atomic_read(&rcu_torture_wcount[i]));
65004+ atomic_read_unchecked(&rcu_torture_wcount[i]));
65005 }
65006 cnt += sprintf(&page[cnt], "\n");
65007 if (cur_ops->stats)
65008@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
65009
65010 if (cur_ops->cleanup)
65011 cur_ops->cleanup();
65012- if (atomic_read(&n_rcu_torture_error))
65013+ if (atomic_read_unchecked(&n_rcu_torture_error))
65014 rcu_torture_print_module_parms("End of test: FAILURE");
65015 else
65016 rcu_torture_print_module_parms("End of test: SUCCESS");
65017@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
65018
65019 rcu_torture_current = NULL;
65020 rcu_torture_current_version = 0;
65021- atomic_set(&n_rcu_torture_alloc, 0);
65022- atomic_set(&n_rcu_torture_alloc_fail, 0);
65023- atomic_set(&n_rcu_torture_free, 0);
65024- atomic_set(&n_rcu_torture_mberror, 0);
65025- atomic_set(&n_rcu_torture_error, 0);
65026+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65027+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65028+ atomic_set_unchecked(&n_rcu_torture_free, 0);
65029+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65030+ atomic_set_unchecked(&n_rcu_torture_error, 0);
65031 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65032- atomic_set(&rcu_torture_wcount[i], 0);
65033+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65034 for_each_possible_cpu(cpu) {
65035 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65036 per_cpu(rcu_torture_count, cpu)[i] = 0;
65037diff -urNp linux-2.6.32.46/kernel/rcutree.c linux-2.6.32.46/kernel/rcutree.c
65038--- linux-2.6.32.46/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
65039+++ linux-2.6.32.46/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
65040@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
65041 /*
65042 * Do softirq processing for the current CPU.
65043 */
65044-static void rcu_process_callbacks(struct softirq_action *unused)
65045+static void rcu_process_callbacks(void)
65046 {
65047 /*
65048 * Memory references from any prior RCU read-side critical sections
65049diff -urNp linux-2.6.32.46/kernel/rcutree_plugin.h linux-2.6.32.46/kernel/rcutree_plugin.h
65050--- linux-2.6.32.46/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
65051+++ linux-2.6.32.46/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
65052@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
65053 */
65054 void __rcu_read_lock(void)
65055 {
65056- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
65057+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
65058 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
65059 }
65060 EXPORT_SYMBOL_GPL(__rcu_read_lock);
65061@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
65062 struct task_struct *t = current;
65063
65064 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
65065- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
65066+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
65067 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
65068 rcu_read_unlock_special(t);
65069 }
65070diff -urNp linux-2.6.32.46/kernel/relay.c linux-2.6.32.46/kernel/relay.c
65071--- linux-2.6.32.46/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
65072+++ linux-2.6.32.46/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
65073@@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
65074 unsigned int flags,
65075 int *nonpad_ret)
65076 {
65077- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
65078+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
65079 struct rchan_buf *rbuf = in->private_data;
65080 unsigned int subbuf_size = rbuf->chan->subbuf_size;
65081 uint64_t pos = (uint64_t) *ppos;
65082@@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
65083 .ops = &relay_pipe_buf_ops,
65084 .spd_release = relay_page_release,
65085 };
65086+ ssize_t ret;
65087+
65088+ pax_track_stack();
65089
65090 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
65091 return 0;
65092diff -urNp linux-2.6.32.46/kernel/resource.c linux-2.6.32.46/kernel/resource.c
65093--- linux-2.6.32.46/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
65094+++ linux-2.6.32.46/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
65095@@ -132,8 +132,18 @@ static const struct file_operations proc
65096
65097 static int __init ioresources_init(void)
65098 {
65099+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65100+#ifdef CONFIG_GRKERNSEC_PROC_USER
65101+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65102+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65103+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65104+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65105+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65106+#endif
65107+#else
65108 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65109 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65110+#endif
65111 return 0;
65112 }
65113 __initcall(ioresources_init);
65114diff -urNp linux-2.6.32.46/kernel/rtmutex.c linux-2.6.32.46/kernel/rtmutex.c
65115--- linux-2.6.32.46/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
65116+++ linux-2.6.32.46/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
65117@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
65118 */
65119 spin_lock_irqsave(&pendowner->pi_lock, flags);
65120
65121- WARN_ON(!pendowner->pi_blocked_on);
65122+ BUG_ON(!pendowner->pi_blocked_on);
65123 WARN_ON(pendowner->pi_blocked_on != waiter);
65124 WARN_ON(pendowner->pi_blocked_on->lock != lock);
65125
65126diff -urNp linux-2.6.32.46/kernel/rtmutex-tester.c linux-2.6.32.46/kernel/rtmutex-tester.c
65127--- linux-2.6.32.46/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
65128+++ linux-2.6.32.46/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
65129@@ -21,7 +21,7 @@
65130 #define MAX_RT_TEST_MUTEXES 8
65131
65132 static spinlock_t rttest_lock;
65133-static atomic_t rttest_event;
65134+static atomic_unchecked_t rttest_event;
65135
65136 struct test_thread_data {
65137 int opcode;
65138@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
65139
65140 case RTTEST_LOCKCONT:
65141 td->mutexes[td->opdata] = 1;
65142- td->event = atomic_add_return(1, &rttest_event);
65143+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65144 return 0;
65145
65146 case RTTEST_RESET:
65147@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
65148 return 0;
65149
65150 case RTTEST_RESETEVENT:
65151- atomic_set(&rttest_event, 0);
65152+ atomic_set_unchecked(&rttest_event, 0);
65153 return 0;
65154
65155 default:
65156@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
65157 return ret;
65158
65159 td->mutexes[id] = 1;
65160- td->event = atomic_add_return(1, &rttest_event);
65161+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65162 rt_mutex_lock(&mutexes[id]);
65163- td->event = atomic_add_return(1, &rttest_event);
65164+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65165 td->mutexes[id] = 4;
65166 return 0;
65167
65168@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
65169 return ret;
65170
65171 td->mutexes[id] = 1;
65172- td->event = atomic_add_return(1, &rttest_event);
65173+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65174 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65175- td->event = atomic_add_return(1, &rttest_event);
65176+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65177 td->mutexes[id] = ret ? 0 : 4;
65178 return ret ? -EINTR : 0;
65179
65180@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
65181 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65182 return ret;
65183
65184- td->event = atomic_add_return(1, &rttest_event);
65185+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65186 rt_mutex_unlock(&mutexes[id]);
65187- td->event = atomic_add_return(1, &rttest_event);
65188+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65189 td->mutexes[id] = 0;
65190 return 0;
65191
65192@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
65193 break;
65194
65195 td->mutexes[dat] = 2;
65196- td->event = atomic_add_return(1, &rttest_event);
65197+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65198 break;
65199
65200 case RTTEST_LOCKBKL:
65201@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
65202 return;
65203
65204 td->mutexes[dat] = 3;
65205- td->event = atomic_add_return(1, &rttest_event);
65206+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65207 break;
65208
65209 case RTTEST_LOCKNOWAIT:
65210@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
65211 return;
65212
65213 td->mutexes[dat] = 1;
65214- td->event = atomic_add_return(1, &rttest_event);
65215+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65216 return;
65217
65218 case RTTEST_LOCKBKL:
65219diff -urNp linux-2.6.32.46/kernel/sched.c linux-2.6.32.46/kernel/sched.c
65220--- linux-2.6.32.46/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
65221+++ linux-2.6.32.46/kernel/sched.c 2011-08-21 19:29:25.000000000 -0400
65222@@ -2764,9 +2764,10 @@ void wake_up_new_task(struct task_struct
65223 {
65224 unsigned long flags;
65225 struct rq *rq;
65226- int cpu = get_cpu();
65227
65228 #ifdef CONFIG_SMP
65229+ int cpu = get_cpu();
65230+
65231 rq = task_rq_lock(p, &flags);
65232 p->state = TASK_WAKING;
65233
65234@@ -5043,7 +5044,7 @@ out:
65235 * In CONFIG_NO_HZ case, the idle load balance owner will do the
65236 * rebalancing for all the cpus for whom scheduler ticks are stopped.
65237 */
65238-static void run_rebalance_domains(struct softirq_action *h)
65239+static void run_rebalance_domains(void)
65240 {
65241 int this_cpu = smp_processor_id();
65242 struct rq *this_rq = cpu_rq(this_cpu);
65243@@ -5700,6 +5701,8 @@ asmlinkage void __sched schedule(void)
65244 struct rq *rq;
65245 int cpu;
65246
65247+ pax_track_stack();
65248+
65249 need_resched:
65250 preempt_disable();
65251 cpu = smp_processor_id();
65252@@ -5770,7 +5773,7 @@ EXPORT_SYMBOL(schedule);
65253 * Look out! "owner" is an entirely speculative pointer
65254 * access and not reliable.
65255 */
65256-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
65257+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
65258 {
65259 unsigned int cpu;
65260 struct rq *rq;
65261@@ -5784,10 +5787,10 @@ int mutex_spin_on_owner(struct mutex *lo
65262 * DEBUG_PAGEALLOC could have unmapped it if
65263 * the mutex owner just released it and exited.
65264 */
65265- if (probe_kernel_address(&owner->cpu, cpu))
65266+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
65267 return 0;
65268 #else
65269- cpu = owner->cpu;
65270+ cpu = task_thread_info(owner)->cpu;
65271 #endif
65272
65273 /*
65274@@ -5816,7 +5819,7 @@ int mutex_spin_on_owner(struct mutex *lo
65275 /*
65276 * Is that owner really running on that cpu?
65277 */
65278- if (task_thread_info(rq->curr) != owner || need_resched())
65279+ if (rq->curr != owner || need_resched())
65280 return 0;
65281
65282 cpu_relax();
65283@@ -6359,6 +6362,8 @@ int can_nice(const struct task_struct *p
65284 /* convert nice value [19,-20] to rlimit style value [1,40] */
65285 int nice_rlim = 20 - nice;
65286
65287+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65288+
65289 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
65290 capable(CAP_SYS_NICE));
65291 }
65292@@ -6392,7 +6397,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65293 if (nice > 19)
65294 nice = 19;
65295
65296- if (increment < 0 && !can_nice(current, nice))
65297+ if (increment < 0 && (!can_nice(current, nice) ||
65298+ gr_handle_chroot_nice()))
65299 return -EPERM;
65300
65301 retval = security_task_setnice(current, nice);
65302@@ -8774,7 +8780,7 @@ static void init_sched_groups_power(int
65303 long power;
65304 int weight;
65305
65306- WARN_ON(!sd || !sd->groups);
65307+ BUG_ON(!sd || !sd->groups);
65308
65309 if (cpu != group_first_cpu(sd->groups))
65310 return;
65311diff -urNp linux-2.6.32.46/kernel/signal.c linux-2.6.32.46/kernel/signal.c
65312--- linux-2.6.32.46/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
65313+++ linux-2.6.32.46/kernel/signal.c 2011-08-16 21:15:58.000000000 -0400
65314@@ -41,12 +41,12 @@
65315
65316 static struct kmem_cache *sigqueue_cachep;
65317
65318-static void __user *sig_handler(struct task_struct *t, int sig)
65319+static __sighandler_t sig_handler(struct task_struct *t, int sig)
65320 {
65321 return t->sighand->action[sig - 1].sa.sa_handler;
65322 }
65323
65324-static int sig_handler_ignored(void __user *handler, int sig)
65325+static int sig_handler_ignored(__sighandler_t handler, int sig)
65326 {
65327 /* Is it explicitly or implicitly ignored? */
65328 return handler == SIG_IGN ||
65329@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
65330 static int sig_task_ignored(struct task_struct *t, int sig,
65331 int from_ancestor_ns)
65332 {
65333- void __user *handler;
65334+ __sighandler_t handler;
65335
65336 handler = sig_handler(t, sig);
65337
65338@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
65339 */
65340 user = get_uid(__task_cred(t)->user);
65341 atomic_inc(&user->sigpending);
65342+
65343+ if (!override_rlimit)
65344+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65345 if (override_rlimit ||
65346 atomic_read(&user->sigpending) <=
65347 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
65348@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
65349
65350 int unhandled_signal(struct task_struct *tsk, int sig)
65351 {
65352- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65353+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65354 if (is_global_init(tsk))
65355 return 1;
65356 if (handler != SIG_IGN && handler != SIG_DFL)
65357@@ -627,6 +630,13 @@ static int check_kill_permission(int sig
65358 }
65359 }
65360
65361+ /* allow glibc communication via tgkill to other threads in our
65362+ thread group */
65363+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65364+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65365+ && gr_handle_signal(t, sig))
65366+ return -EPERM;
65367+
65368 return security_task_kill(t, info, sig, 0);
65369 }
65370
65371@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct si
65372 return send_signal(sig, info, p, 1);
65373 }
65374
65375-static int
65376+int
65377 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65378 {
65379 return send_signal(sig, info, t, 0);
65380@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *
65381 unsigned long int flags;
65382 int ret, blocked, ignored;
65383 struct k_sigaction *action;
65384+ int is_unhandled = 0;
65385
65386 spin_lock_irqsave(&t->sighand->siglock, flags);
65387 action = &t->sighand->action[sig-1];
65388@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *
65389 }
65390 if (action->sa.sa_handler == SIG_DFL)
65391 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65392+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65393+ is_unhandled = 1;
65394 ret = specific_send_sig_info(sig, info, t);
65395 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65396
65397+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
65398+ normal operation */
65399+ if (is_unhandled) {
65400+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65401+ gr_handle_crash(t, sig);
65402+ }
65403+
65404 return ret;
65405 }
65406
65407@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct
65408 {
65409 int ret = check_kill_permission(sig, info, p);
65410
65411- if (!ret && sig)
65412+ if (!ret && sig) {
65413 ret = do_send_sig_info(sig, info, p, true);
65414+ if (!ret)
65415+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65416+ }
65417
65418 return ret;
65419 }
65420@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
65421 {
65422 siginfo_t info;
65423
65424+ pax_track_stack();
65425+
65426 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
65427
65428 memset(&info, 0, sizeof info);
65429@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid,
65430 int error = -ESRCH;
65431
65432 rcu_read_lock();
65433- p = find_task_by_vpid(pid);
65434+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65435+ /* allow glibc communication via tgkill to other threads in our
65436+ thread group */
65437+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65438+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
65439+ p = find_task_by_vpid_unrestricted(pid);
65440+ else
65441+#endif
65442+ p = find_task_by_vpid(pid);
65443 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65444 error = check_kill_permission(sig, info, p);
65445 /*
65446diff -urNp linux-2.6.32.46/kernel/smp.c linux-2.6.32.46/kernel/smp.c
65447--- linux-2.6.32.46/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
65448+++ linux-2.6.32.46/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
65449@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
65450 }
65451 EXPORT_SYMBOL(smp_call_function);
65452
65453-void ipi_call_lock(void)
65454+void ipi_call_lock(void) __acquires(call_function.lock)
65455 {
65456 spin_lock(&call_function.lock);
65457 }
65458
65459-void ipi_call_unlock(void)
65460+void ipi_call_unlock(void) __releases(call_function.lock)
65461 {
65462 spin_unlock(&call_function.lock);
65463 }
65464
65465-void ipi_call_lock_irq(void)
65466+void ipi_call_lock_irq(void) __acquires(call_function.lock)
65467 {
65468 spin_lock_irq(&call_function.lock);
65469 }
65470
65471-void ipi_call_unlock_irq(void)
65472+void ipi_call_unlock_irq(void) __releases(call_function.lock)
65473 {
65474 spin_unlock_irq(&call_function.lock);
65475 }
65476diff -urNp linux-2.6.32.46/kernel/softirq.c linux-2.6.32.46/kernel/softirq.c
65477--- linux-2.6.32.46/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
65478+++ linux-2.6.32.46/kernel/softirq.c 2011-08-05 20:33:55.000000000 -0400
65479@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
65480
65481 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65482
65483-char *softirq_to_name[NR_SOFTIRQS] = {
65484+const char * const softirq_to_name[NR_SOFTIRQS] = {
65485 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65486 "TASKLET", "SCHED", "HRTIMER", "RCU"
65487 };
65488@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
65489
65490 asmlinkage void __do_softirq(void)
65491 {
65492- struct softirq_action *h;
65493+ const struct softirq_action *h;
65494 __u32 pending;
65495 int max_restart = MAX_SOFTIRQ_RESTART;
65496 int cpu;
65497@@ -233,7 +233,7 @@ restart:
65498 kstat_incr_softirqs_this_cpu(h - softirq_vec);
65499
65500 trace_softirq_entry(h, softirq_vec);
65501- h->action(h);
65502+ h->action();
65503 trace_softirq_exit(h, softirq_vec);
65504 if (unlikely(prev_count != preempt_count())) {
65505 printk(KERN_ERR "huh, entered softirq %td %s %p"
65506@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
65507 local_irq_restore(flags);
65508 }
65509
65510-void open_softirq(int nr, void (*action)(struct softirq_action *))
65511+void open_softirq(int nr, void (*action)(void))
65512 {
65513- softirq_vec[nr].action = action;
65514+ pax_open_kernel();
65515+ *(void **)&softirq_vec[nr].action = action;
65516+ pax_close_kernel();
65517 }
65518
65519 /*
65520@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct
65521
65522 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65523
65524-static void tasklet_action(struct softirq_action *a)
65525+static void tasklet_action(void)
65526 {
65527 struct tasklet_struct *list;
65528
65529@@ -454,7 +456,7 @@ static void tasklet_action(struct softir
65530 }
65531 }
65532
65533-static void tasklet_hi_action(struct softirq_action *a)
65534+static void tasklet_hi_action(void)
65535 {
65536 struct tasklet_struct *list;
65537
65538diff -urNp linux-2.6.32.46/kernel/sys.c linux-2.6.32.46/kernel/sys.c
65539--- linux-2.6.32.46/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
65540+++ linux-2.6.32.46/kernel/sys.c 2011-08-11 19:51:54.000000000 -0400
65541@@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
65542 error = -EACCES;
65543 goto out;
65544 }
65545+
65546+ if (gr_handle_chroot_setpriority(p, niceval)) {
65547+ error = -EACCES;
65548+ goto out;
65549+ }
65550+
65551 no_nice = security_task_setnice(p, niceval);
65552 if (no_nice) {
65553 error = no_nice;
65554@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
65555 !(user = find_user(who)))
65556 goto out_unlock; /* No processes for this user */
65557
65558- do_each_thread(g, p)
65559+ do_each_thread(g, p) {
65560 if (__task_cred(p)->uid == who)
65561 error = set_one_prio(p, niceval, error);
65562- while_each_thread(g, p);
65563+ } while_each_thread(g, p);
65564 if (who != cred->uid)
65565 free_uid(user); /* For find_user() */
65566 break;
65567@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
65568 !(user = find_user(who)))
65569 goto out_unlock; /* No processes for this user */
65570
65571- do_each_thread(g, p)
65572+ do_each_thread(g, p) {
65573 if (__task_cred(p)->uid == who) {
65574 niceval = 20 - task_nice(p);
65575 if (niceval > retval)
65576 retval = niceval;
65577 }
65578- while_each_thread(g, p);
65579+ } while_each_thread(g, p);
65580 if (who != cred->uid)
65581 free_uid(user); /* for find_user() */
65582 break;
65583@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
65584 goto error;
65585 }
65586
65587+ if (gr_check_group_change(new->gid, new->egid, -1))
65588+ goto error;
65589+
65590 if (rgid != (gid_t) -1 ||
65591 (egid != (gid_t) -1 && egid != old->gid))
65592 new->sgid = new->egid;
65593@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
65594 goto error;
65595
65596 retval = -EPERM;
65597+
65598+ if (gr_check_group_change(gid, gid, gid))
65599+ goto error;
65600+
65601 if (capable(CAP_SETGID))
65602 new->gid = new->egid = new->sgid = new->fsgid = gid;
65603 else if (gid == old->gid || gid == old->sgid)
65604@@ -567,12 +580,19 @@ static int set_user(struct cred *new)
65605 if (!new_user)
65606 return -EAGAIN;
65607
65608+ /*
65609+ * We don't fail in case of NPROC limit excess here because too many
65610+ * poorly written programs don't check set*uid() return code, assuming
65611+ * it never fails if called by root. We may still enforce NPROC limit
65612+ * for programs doing set*uid()+execve() by harmlessly deferring the
65613+ * failure to the execve() stage.
65614+ */
65615 if (atomic_read(&new_user->processes) >=
65616 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
65617- new_user != INIT_USER) {
65618- free_uid(new_user);
65619- return -EAGAIN;
65620- }
65621+ new_user != INIT_USER)
65622+ current->flags |= PF_NPROC_EXCEEDED;
65623+ else
65624+ current->flags &= ~PF_NPROC_EXCEEDED;
65625
65626 free_uid(new->user);
65627 new->user = new_user;
65628@@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
65629 goto error;
65630 }
65631
65632+ if (gr_check_user_change(new->uid, new->euid, -1))
65633+ goto error;
65634+
65635 if (new->uid != old->uid) {
65636 retval = set_user(new);
65637 if (retval < 0)
65638@@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
65639 goto error;
65640
65641 retval = -EPERM;
65642+
65643+ if (gr_check_crash_uid(uid))
65644+ goto error;
65645+ if (gr_check_user_change(uid, uid, uid))
65646+ goto error;
65647+
65648 if (capable(CAP_SETUID)) {
65649 new->suid = new->uid = uid;
65650 if (uid != old->uid) {
65651@@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
65652 goto error;
65653 }
65654
65655+ if (gr_check_user_change(ruid, euid, -1))
65656+ goto error;
65657+
65658 if (ruid != (uid_t) -1) {
65659 new->uid = ruid;
65660 if (ruid != old->uid) {
65661@@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
65662 goto error;
65663 }
65664
65665+ if (gr_check_group_change(rgid, egid, -1))
65666+ goto error;
65667+
65668 if (rgid != (gid_t) -1)
65669 new->gid = rgid;
65670 if (egid != (gid_t) -1)
65671@@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
65672 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
65673 goto error;
65674
65675+ if (gr_check_user_change(-1, -1, uid))
65676+ goto error;
65677+
65678 if (uid == old->uid || uid == old->euid ||
65679 uid == old->suid || uid == old->fsuid ||
65680 capable(CAP_SETUID)) {
65681@@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
65682 if (gid == old->gid || gid == old->egid ||
65683 gid == old->sgid || gid == old->fsgid ||
65684 capable(CAP_SETGID)) {
65685+ if (gr_check_group_change(-1, -1, gid))
65686+ goto error;
65687+
65688 if (gid != old_fsgid) {
65689 new->fsgid = gid;
65690 goto change_okay;
65691@@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
65692 error = get_dumpable(me->mm);
65693 break;
65694 case PR_SET_DUMPABLE:
65695- if (arg2 < 0 || arg2 > 1) {
65696+ if (arg2 > 1) {
65697 error = -EINVAL;
65698 break;
65699 }
65700diff -urNp linux-2.6.32.46/kernel/sysctl.c linux-2.6.32.46/kernel/sysctl.c
65701--- linux-2.6.32.46/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
65702+++ linux-2.6.32.46/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
65703@@ -63,6 +63,13 @@
65704 static int deprecated_sysctl_warning(struct __sysctl_args *args);
65705
65706 #if defined(CONFIG_SYSCTL)
65707+#include <linux/grsecurity.h>
65708+#include <linux/grinternal.h>
65709+
65710+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
65711+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
65712+ const int op);
65713+extern int gr_handle_chroot_sysctl(const int op);
65714
65715 /* External variables not in a header file. */
65716 extern int C_A_D;
65717@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
65718 static int proc_taint(struct ctl_table *table, int write,
65719 void __user *buffer, size_t *lenp, loff_t *ppos);
65720 #endif
65721+extern ctl_table grsecurity_table[];
65722
65723 static struct ctl_table root_table[];
65724 static struct ctl_table_root sysctl_table_root;
65725@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
65726 int sysctl_legacy_va_layout;
65727 #endif
65728
65729+#ifdef CONFIG_PAX_SOFTMODE
65730+static ctl_table pax_table[] = {
65731+ {
65732+ .ctl_name = CTL_UNNUMBERED,
65733+ .procname = "softmode",
65734+ .data = &pax_softmode,
65735+ .maxlen = sizeof(unsigned int),
65736+ .mode = 0600,
65737+ .proc_handler = &proc_dointvec,
65738+ },
65739+
65740+ { .ctl_name = 0 }
65741+};
65742+#endif
65743+
65744 extern int prove_locking;
65745 extern int lock_stat;
65746
65747@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
65748 #endif
65749
65750 static struct ctl_table kern_table[] = {
65751+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
65752+ {
65753+ .ctl_name = CTL_UNNUMBERED,
65754+ .procname = "grsecurity",
65755+ .mode = 0500,
65756+ .child = grsecurity_table,
65757+ },
65758+#endif
65759+
65760+#ifdef CONFIG_PAX_SOFTMODE
65761+ {
65762+ .ctl_name = CTL_UNNUMBERED,
65763+ .procname = "pax",
65764+ .mode = 0500,
65765+ .child = pax_table,
65766+ },
65767+#endif
65768+
65769 {
65770 .ctl_name = CTL_UNNUMBERED,
65771 .procname = "sched_child_runs_first",
65772@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
65773 .data = &modprobe_path,
65774 .maxlen = KMOD_PATH_LEN,
65775 .mode = 0644,
65776- .proc_handler = &proc_dostring,
65777- .strategy = &sysctl_string,
65778+ .proc_handler = &proc_dostring_modpriv,
65779+ .strategy = &sysctl_string_modpriv,
65780 },
65781 {
65782 .ctl_name = CTL_UNNUMBERED,
65783@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
65784 .mode = 0644,
65785 .proc_handler = &proc_dointvec
65786 },
65787+ {
65788+ .procname = "heap_stack_gap",
65789+ .data = &sysctl_heap_stack_gap,
65790+ .maxlen = sizeof(sysctl_heap_stack_gap),
65791+ .mode = 0644,
65792+ .proc_handler = proc_doulongvec_minmax,
65793+ },
65794 #else
65795 {
65796 .ctl_name = CTL_UNNUMBERED,
65797@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
65798 return 0;
65799 }
65800
65801+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
65802+
65803 static int parse_table(int __user *name, int nlen,
65804 void __user *oldval, size_t __user *oldlenp,
65805 void __user *newval, size_t newlen,
65806@@ -1821,7 +1871,7 @@ repeat:
65807 if (n == table->ctl_name) {
65808 int error;
65809 if (table->child) {
65810- if (sysctl_perm(root, table, MAY_EXEC))
65811+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
65812 return -EPERM;
65813 name++;
65814 nlen--;
65815@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
65816 int error;
65817 int mode;
65818
65819+ if (table->parent != NULL && table->parent->procname != NULL &&
65820+ table->procname != NULL &&
65821+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
65822+ return -EACCES;
65823+ if (gr_handle_chroot_sysctl(op))
65824+ return -EACCES;
65825+ error = gr_handle_sysctl(table, op);
65826+ if (error)
65827+ return error;
65828+
65829+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
65830+ if (error)
65831+ return error;
65832+
65833+ if (root->permissions)
65834+ mode = root->permissions(root, current->nsproxy, table);
65835+ else
65836+ mode = table->mode;
65837+
65838+ return test_perm(mode, op);
65839+}
65840+
65841+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
65842+{
65843+ int error;
65844+ int mode;
65845+
65846 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
65847 if (error)
65848 return error;
65849@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
65850 buffer, lenp, ppos);
65851 }
65852
65853+int proc_dostring_modpriv(struct ctl_table *table, int write,
65854+ void __user *buffer, size_t *lenp, loff_t *ppos)
65855+{
65856+ if (write && !capable(CAP_SYS_MODULE))
65857+ return -EPERM;
65858+
65859+ return _proc_do_string(table->data, table->maxlen, write,
65860+ buffer, lenp, ppos);
65861+}
65862+
65863
65864 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
65865 int *valp,
65866@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
65867 vleft = table->maxlen / sizeof(unsigned long);
65868 left = *lenp;
65869
65870- for (; left && vleft--; i++, min++, max++, first=0) {
65871+ for (; left && vleft--; i++, first=0) {
65872 if (write) {
65873 while (left) {
65874 char c;
65875@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
65876 return -ENOSYS;
65877 }
65878
65879+int proc_dostring_modpriv(struct ctl_table *table, int write,
65880+ void __user *buffer, size_t *lenp, loff_t *ppos)
65881+{
65882+ return -ENOSYS;
65883+}
65884+
65885 int proc_dointvec(struct ctl_table *table, int write,
65886 void __user *buffer, size_t *lenp, loff_t *ppos)
65887 {
65888@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
65889 return 1;
65890 }
65891
65892+int sysctl_string_modpriv(struct ctl_table *table,
65893+ void __user *oldval, size_t __user *oldlenp,
65894+ void __user *newval, size_t newlen)
65895+{
65896+ if (newval && newlen && !capable(CAP_SYS_MODULE))
65897+ return -EPERM;
65898+
65899+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
65900+}
65901+
65902 /*
65903 * This function makes sure that all of the integers in the vector
65904 * are between the minimum and maximum values given in the arrays
65905@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
65906 return -ENOSYS;
65907 }
65908
65909+int sysctl_string_modpriv(struct ctl_table *table,
65910+ void __user *oldval, size_t __user *oldlenp,
65911+ void __user *newval, size_t newlen)
65912+{
65913+ return -ENOSYS;
65914+}
65915+
65916 int sysctl_intvec(struct ctl_table *table,
65917 void __user *oldval, size_t __user *oldlenp,
65918 void __user *newval, size_t newlen)
65919@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
65920 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
65921 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
65922 EXPORT_SYMBOL(proc_dostring);
65923+EXPORT_SYMBOL(proc_dostring_modpriv);
65924 EXPORT_SYMBOL(proc_doulongvec_minmax);
65925 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
65926 EXPORT_SYMBOL(register_sysctl_table);
65927@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
65928 EXPORT_SYMBOL(sysctl_jiffies);
65929 EXPORT_SYMBOL(sysctl_ms_jiffies);
65930 EXPORT_SYMBOL(sysctl_string);
65931+EXPORT_SYMBOL(sysctl_string_modpriv);
65932 EXPORT_SYMBOL(sysctl_data);
65933 EXPORT_SYMBOL(unregister_sysctl_table);
65934diff -urNp linux-2.6.32.46/kernel/sysctl_check.c linux-2.6.32.46/kernel/sysctl_check.c
65935--- linux-2.6.32.46/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
65936+++ linux-2.6.32.46/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
65937@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
65938 } else {
65939 if ((table->strategy == sysctl_data) ||
65940 (table->strategy == sysctl_string) ||
65941+ (table->strategy == sysctl_string_modpriv) ||
65942 (table->strategy == sysctl_intvec) ||
65943 (table->strategy == sysctl_jiffies) ||
65944 (table->strategy == sysctl_ms_jiffies) ||
65945 (table->proc_handler == proc_dostring) ||
65946+ (table->proc_handler == proc_dostring_modpriv) ||
65947 (table->proc_handler == proc_dointvec) ||
65948 (table->proc_handler == proc_dointvec_minmax) ||
65949 (table->proc_handler == proc_dointvec_jiffies) ||
65950diff -urNp linux-2.6.32.46/kernel/taskstats.c linux-2.6.32.46/kernel/taskstats.c
65951--- linux-2.6.32.46/kernel/taskstats.c 2011-07-13 17:23:04.000000000 -0400
65952+++ linux-2.6.32.46/kernel/taskstats.c 2011-07-13 17:23:19.000000000 -0400
65953@@ -26,9 +26,12 @@
65954 #include <linux/cgroup.h>
65955 #include <linux/fs.h>
65956 #include <linux/file.h>
65957+#include <linux/grsecurity.h>
65958 #include <net/genetlink.h>
65959 #include <asm/atomic.h>
65960
65961+extern int gr_is_taskstats_denied(int pid);
65962+
65963 /*
65964 * Maximum length of a cpumask that can be specified in
65965 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
65966@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_
65967 size_t size;
65968 cpumask_var_t mask;
65969
65970+ if (gr_is_taskstats_denied(current->pid))
65971+ return -EACCES;
65972+
65973 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
65974 return -ENOMEM;
65975
65976diff -urNp linux-2.6.32.46/kernel/time/tick-broadcast.c linux-2.6.32.46/kernel/time/tick-broadcast.c
65977--- linux-2.6.32.46/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
65978+++ linux-2.6.32.46/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
65979@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
65980 * then clear the broadcast bit.
65981 */
65982 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
65983- int cpu = smp_processor_id();
65984+ cpu = smp_processor_id();
65985
65986 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
65987 tick_broadcast_clear_oneshot(cpu);
65988diff -urNp linux-2.6.32.46/kernel/time/timekeeping.c linux-2.6.32.46/kernel/time/timekeeping.c
65989--- linux-2.6.32.46/kernel/time/timekeeping.c 2011-06-25 12:55:35.000000000 -0400
65990+++ linux-2.6.32.46/kernel/time/timekeeping.c 2011-06-25 12:56:37.000000000 -0400
65991@@ -14,6 +14,7 @@
65992 #include <linux/init.h>
65993 #include <linux/mm.h>
65994 #include <linux/sched.h>
65995+#include <linux/grsecurity.h>
65996 #include <linux/sysdev.h>
65997 #include <linux/clocksource.h>
65998 #include <linux/jiffies.h>
65999@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
66000 */
66001 struct timespec ts = xtime;
66002 timespec_add_ns(&ts, nsec);
66003- ACCESS_ONCE(xtime_cache) = ts;
66004+ ACCESS_ONCE_RW(xtime_cache) = ts;
66005 }
66006
66007 /* must hold xtime_lock */
66008@@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
66009 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66010 return -EINVAL;
66011
66012+ gr_log_timechange();
66013+
66014 write_seqlock_irqsave(&xtime_lock, flags);
66015
66016 timekeeping_forward_now();
66017diff -urNp linux-2.6.32.46/kernel/time/timer_list.c linux-2.6.32.46/kernel/time/timer_list.c
66018--- linux-2.6.32.46/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
66019+++ linux-2.6.32.46/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
66020@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
66021
66022 static void print_name_offset(struct seq_file *m, void *sym)
66023 {
66024+#ifdef CONFIG_GRKERNSEC_HIDESYM
66025+ SEQ_printf(m, "<%p>", NULL);
66026+#else
66027 char symname[KSYM_NAME_LEN];
66028
66029 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66030 SEQ_printf(m, "<%p>", sym);
66031 else
66032 SEQ_printf(m, "%s", symname);
66033+#endif
66034 }
66035
66036 static void
66037@@ -112,7 +116,11 @@ next_one:
66038 static void
66039 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66040 {
66041+#ifdef CONFIG_GRKERNSEC_HIDESYM
66042+ SEQ_printf(m, " .base: %p\n", NULL);
66043+#else
66044 SEQ_printf(m, " .base: %p\n", base);
66045+#endif
66046 SEQ_printf(m, " .index: %d\n",
66047 base->index);
66048 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66049@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
66050 {
66051 struct proc_dir_entry *pe;
66052
66053+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66054+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66055+#else
66056 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66057+#endif
66058 if (!pe)
66059 return -ENOMEM;
66060 return 0;
66061diff -urNp linux-2.6.32.46/kernel/time/timer_stats.c linux-2.6.32.46/kernel/time/timer_stats.c
66062--- linux-2.6.32.46/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
66063+++ linux-2.6.32.46/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
66064@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66065 static unsigned long nr_entries;
66066 static struct entry entries[MAX_ENTRIES];
66067
66068-static atomic_t overflow_count;
66069+static atomic_unchecked_t overflow_count;
66070
66071 /*
66072 * The entries are in a hash-table, for fast lookup:
66073@@ -140,7 +140,7 @@ static void reset_entries(void)
66074 nr_entries = 0;
66075 memset(entries, 0, sizeof(entries));
66076 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66077- atomic_set(&overflow_count, 0);
66078+ atomic_set_unchecked(&overflow_count, 0);
66079 }
66080
66081 static struct entry *alloc_entry(void)
66082@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
66083 if (likely(entry))
66084 entry->count++;
66085 else
66086- atomic_inc(&overflow_count);
66087+ atomic_inc_unchecked(&overflow_count);
66088
66089 out_unlock:
66090 spin_unlock_irqrestore(lock, flags);
66091@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
66092
66093 static void print_name_offset(struct seq_file *m, unsigned long addr)
66094 {
66095+#ifdef CONFIG_GRKERNSEC_HIDESYM
66096+ seq_printf(m, "<%p>", NULL);
66097+#else
66098 char symname[KSYM_NAME_LEN];
66099
66100 if (lookup_symbol_name(addr, symname) < 0)
66101 seq_printf(m, "<%p>", (void *)addr);
66102 else
66103 seq_printf(m, "%s", symname);
66104+#endif
66105 }
66106
66107 static int tstats_show(struct seq_file *m, void *v)
66108@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
66109
66110 seq_puts(m, "Timer Stats Version: v0.2\n");
66111 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66112- if (atomic_read(&overflow_count))
66113+ if (atomic_read_unchecked(&overflow_count))
66114 seq_printf(m, "Overflow: %d entries\n",
66115- atomic_read(&overflow_count));
66116+ atomic_read_unchecked(&overflow_count));
66117
66118 for (i = 0; i < nr_entries; i++) {
66119 entry = entries + i;
66120@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
66121 {
66122 struct proc_dir_entry *pe;
66123
66124+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66125+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66126+#else
66127 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66128+#endif
66129 if (!pe)
66130 return -ENOMEM;
66131 return 0;
66132diff -urNp linux-2.6.32.46/kernel/time.c linux-2.6.32.46/kernel/time.c
66133--- linux-2.6.32.46/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
66134+++ linux-2.6.32.46/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
66135@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
66136 return error;
66137
66138 if (tz) {
66139+ /* we log in do_settimeofday called below, so don't log twice
66140+ */
66141+ if (!tv)
66142+ gr_log_timechange();
66143+
66144 /* SMP safe, global irq locking makes it work. */
66145 sys_tz = *tz;
66146 update_vsyscall_tz();
66147@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
66148 * Avoid unnecessary multiplications/divisions in the
66149 * two most common HZ cases:
66150 */
66151-unsigned int inline jiffies_to_msecs(const unsigned long j)
66152+inline unsigned int jiffies_to_msecs(const unsigned long j)
66153 {
66154 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
66155 return (MSEC_PER_SEC / HZ) * j;
66156@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
66157 }
66158 EXPORT_SYMBOL(jiffies_to_msecs);
66159
66160-unsigned int inline jiffies_to_usecs(const unsigned long j)
66161+inline unsigned int jiffies_to_usecs(const unsigned long j)
66162 {
66163 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
66164 return (USEC_PER_SEC / HZ) * j;
66165diff -urNp linux-2.6.32.46/kernel/timer.c linux-2.6.32.46/kernel/timer.c
66166--- linux-2.6.32.46/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
66167+++ linux-2.6.32.46/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
66168@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
66169 /*
66170 * This function runs timers and the timer-tq in bottom half context.
66171 */
66172-static void run_timer_softirq(struct softirq_action *h)
66173+static void run_timer_softirq(void)
66174 {
66175 struct tvec_base *base = __get_cpu_var(tvec_bases);
66176
66177diff -urNp linux-2.6.32.46/kernel/trace/blktrace.c linux-2.6.32.46/kernel/trace/blktrace.c
66178--- linux-2.6.32.46/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
66179+++ linux-2.6.32.46/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
66180@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
66181 struct blk_trace *bt = filp->private_data;
66182 char buf[16];
66183
66184- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66185+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66186
66187 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66188 }
66189@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
66190 return 1;
66191
66192 bt = buf->chan->private_data;
66193- atomic_inc(&bt->dropped);
66194+ atomic_inc_unchecked(&bt->dropped);
66195 return 0;
66196 }
66197
66198@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
66199
66200 bt->dir = dir;
66201 bt->dev = dev;
66202- atomic_set(&bt->dropped, 0);
66203+ atomic_set_unchecked(&bt->dropped, 0);
66204
66205 ret = -EIO;
66206 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66207diff -urNp linux-2.6.32.46/kernel/trace/ftrace.c linux-2.6.32.46/kernel/trace/ftrace.c
66208--- linux-2.6.32.46/kernel/trace/ftrace.c 2011-06-25 12:55:35.000000000 -0400
66209+++ linux-2.6.32.46/kernel/trace/ftrace.c 2011-06-25 12:56:37.000000000 -0400
66210@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
66211
66212 ip = rec->ip;
66213
66214+ ret = ftrace_arch_code_modify_prepare();
66215+ FTRACE_WARN_ON(ret);
66216+ if (ret)
66217+ return 0;
66218+
66219 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66220+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66221 if (ret) {
66222 ftrace_bug(ret, ip);
66223 rec->flags |= FTRACE_FL_FAILED;
66224- return 0;
66225 }
66226- return 1;
66227+ return ret ? 0 : 1;
66228 }
66229
66230 /*
66231diff -urNp linux-2.6.32.46/kernel/trace/ring_buffer.c linux-2.6.32.46/kernel/trace/ring_buffer.c
66232--- linux-2.6.32.46/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
66233+++ linux-2.6.32.46/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
66234@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
66235 * the reader page). But if the next page is a header page,
66236 * its flags will be non zero.
66237 */
66238-static int inline
66239+static inline int
66240 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
66241 struct buffer_page *page, struct list_head *list)
66242 {
66243diff -urNp linux-2.6.32.46/kernel/trace/trace.c linux-2.6.32.46/kernel/trace/trace.c
66244--- linux-2.6.32.46/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
66245+++ linux-2.6.32.46/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
66246@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
66247 size_t rem;
66248 unsigned int i;
66249
66250+ pax_track_stack();
66251+
66252 /* copy the tracer to avoid using a global lock all around */
66253 mutex_lock(&trace_types_lock);
66254 if (unlikely(old_tracer != current_trace && current_trace)) {
66255@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
66256 int entries, size, i;
66257 size_t ret;
66258
66259+ pax_track_stack();
66260+
66261 if (*ppos & (PAGE_SIZE - 1)) {
66262 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
66263 return -EINVAL;
66264@@ -3816,10 +3820,9 @@ static const struct file_operations trac
66265 };
66266 #endif
66267
66268-static struct dentry *d_tracer;
66269-
66270 struct dentry *tracing_init_dentry(void)
66271 {
66272+ static struct dentry *d_tracer;
66273 static int once;
66274
66275 if (d_tracer)
66276@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
66277 return d_tracer;
66278 }
66279
66280-static struct dentry *d_percpu;
66281-
66282 struct dentry *tracing_dentry_percpu(void)
66283 {
66284+ static struct dentry *d_percpu;
66285 static int once;
66286 struct dentry *d_tracer;
66287
66288diff -urNp linux-2.6.32.46/kernel/trace/trace_events.c linux-2.6.32.46/kernel/trace/trace_events.c
66289--- linux-2.6.32.46/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
66290+++ linux-2.6.32.46/kernel/trace/trace_events.c 2011-08-05 20:33:55.000000000 -0400
66291@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list
66292 * Modules must own their file_operations to keep up with
66293 * reference counting.
66294 */
66295+
66296 struct ftrace_module_file_ops {
66297 struct list_head list;
66298 struct module *mod;
66299- struct file_operations id;
66300- struct file_operations enable;
66301- struct file_operations format;
66302- struct file_operations filter;
66303 };
66304
66305 static void remove_subsystem_dir(const char *name)
66306@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod
66307
66308 file_ops->mod = mod;
66309
66310- file_ops->id = ftrace_event_id_fops;
66311- file_ops->id.owner = mod;
66312-
66313- file_ops->enable = ftrace_enable_fops;
66314- file_ops->enable.owner = mod;
66315-
66316- file_ops->filter = ftrace_event_filter_fops;
66317- file_ops->filter.owner = mod;
66318-
66319- file_ops->format = ftrace_event_format_fops;
66320- file_ops->format.owner = mod;
66321+ pax_open_kernel();
66322+ *(void **)&mod->trace_id.owner = mod;
66323+ *(void **)&mod->trace_enable.owner = mod;
66324+ *(void **)&mod->trace_filter.owner = mod;
66325+ *(void **)&mod->trace_format.owner = mod;
66326+ pax_close_kernel();
66327
66328 list_add(&file_ops->list, &ftrace_module_file_list);
66329
66330@@ -1063,8 +1055,8 @@ static void trace_module_add_events(stru
66331 call->mod = mod;
66332 list_add(&call->list, &ftrace_events);
66333 event_create_dir(call, d_events,
66334- &file_ops->id, &file_ops->enable,
66335- &file_ops->filter, &file_ops->format);
66336+ &mod->trace_id, &mod->trace_enable,
66337+ &mod->trace_filter, &mod->trace_format);
66338 }
66339 }
66340
66341diff -urNp linux-2.6.32.46/kernel/trace/trace_mmiotrace.c linux-2.6.32.46/kernel/trace/trace_mmiotrace.c
66342--- linux-2.6.32.46/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
66343+++ linux-2.6.32.46/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
66344@@ -23,7 +23,7 @@ struct header_iter {
66345 static struct trace_array *mmio_trace_array;
66346 static bool overrun_detected;
66347 static unsigned long prev_overruns;
66348-static atomic_t dropped_count;
66349+static atomic_unchecked_t dropped_count;
66350
66351 static void mmio_reset_data(struct trace_array *tr)
66352 {
66353@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
66354
66355 static unsigned long count_overruns(struct trace_iterator *iter)
66356 {
66357- unsigned long cnt = atomic_xchg(&dropped_count, 0);
66358+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66359 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66360
66361 if (over > prev_overruns)
66362@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
66363 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66364 sizeof(*entry), 0, pc);
66365 if (!event) {
66366- atomic_inc(&dropped_count);
66367+ atomic_inc_unchecked(&dropped_count);
66368 return;
66369 }
66370 entry = ring_buffer_event_data(event);
66371@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
66372 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66373 sizeof(*entry), 0, pc);
66374 if (!event) {
66375- atomic_inc(&dropped_count);
66376+ atomic_inc_unchecked(&dropped_count);
66377 return;
66378 }
66379 entry = ring_buffer_event_data(event);
66380diff -urNp linux-2.6.32.46/kernel/trace/trace_output.c linux-2.6.32.46/kernel/trace/trace_output.c
66381--- linux-2.6.32.46/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
66382+++ linux-2.6.32.46/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
66383@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
66384 return 0;
66385 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66386 if (!IS_ERR(p)) {
66387- p = mangle_path(s->buffer + s->len, p, "\n");
66388+ p = mangle_path(s->buffer + s->len, p, "\n\\");
66389 if (p) {
66390 s->len = p - s->buffer;
66391 return 1;
66392diff -urNp linux-2.6.32.46/kernel/trace/trace_stack.c linux-2.6.32.46/kernel/trace/trace_stack.c
66393--- linux-2.6.32.46/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
66394+++ linux-2.6.32.46/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
66395@@ -50,7 +50,7 @@ static inline void check_stack(void)
66396 return;
66397
66398 /* we do not handle interrupt stacks yet */
66399- if (!object_is_on_stack(&this_size))
66400+ if (!object_starts_on_stack(&this_size))
66401 return;
66402
66403 local_irq_save(flags);
66404diff -urNp linux-2.6.32.46/kernel/trace/trace_workqueue.c linux-2.6.32.46/kernel/trace/trace_workqueue.c
66405--- linux-2.6.32.46/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
66406+++ linux-2.6.32.46/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
66407@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
66408 int cpu;
66409 pid_t pid;
66410 /* Can be inserted from interrupt or user context, need to be atomic */
66411- atomic_t inserted;
66412+ atomic_unchecked_t inserted;
66413 /*
66414 * Don't need to be atomic, works are serialized in a single workqueue thread
66415 * on a single CPU.
66416@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
66417 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66418 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66419 if (node->pid == wq_thread->pid) {
66420- atomic_inc(&node->inserted);
66421+ atomic_inc_unchecked(&node->inserted);
66422 goto found;
66423 }
66424 }
66425@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
66426 tsk = get_pid_task(pid, PIDTYPE_PID);
66427 if (tsk) {
66428 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66429- atomic_read(&cws->inserted), cws->executed,
66430+ atomic_read_unchecked(&cws->inserted), cws->executed,
66431 tsk->comm);
66432 put_task_struct(tsk);
66433 }
66434diff -urNp linux-2.6.32.46/kernel/user.c linux-2.6.32.46/kernel/user.c
66435--- linux-2.6.32.46/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
66436+++ linux-2.6.32.46/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
66437@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
66438 spin_lock_irq(&uidhash_lock);
66439 up = uid_hash_find(uid, hashent);
66440 if (up) {
66441+ put_user_ns(ns);
66442 key_put(new->uid_keyring);
66443 key_put(new->session_keyring);
66444 kmem_cache_free(uid_cachep, new);
66445diff -urNp linux-2.6.32.46/lib/bug.c linux-2.6.32.46/lib/bug.c
66446--- linux-2.6.32.46/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
66447+++ linux-2.6.32.46/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
66448@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
66449 return BUG_TRAP_TYPE_NONE;
66450
66451 bug = find_bug(bugaddr);
66452+ if (!bug)
66453+ return BUG_TRAP_TYPE_NONE;
66454
66455 printk(KERN_EMERG "------------[ cut here ]------------\n");
66456
66457diff -urNp linux-2.6.32.46/lib/debugobjects.c linux-2.6.32.46/lib/debugobjects.c
66458--- linux-2.6.32.46/lib/debugobjects.c 2011-07-13 17:23:04.000000000 -0400
66459+++ linux-2.6.32.46/lib/debugobjects.c 2011-07-13 17:23:19.000000000 -0400
66460@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
66461 if (limit > 4)
66462 return;
66463
66464- is_on_stack = object_is_on_stack(addr);
66465+ is_on_stack = object_starts_on_stack(addr);
66466 if (is_on_stack == onstack)
66467 return;
66468
66469diff -urNp linux-2.6.32.46/lib/dma-debug.c linux-2.6.32.46/lib/dma-debug.c
66470--- linux-2.6.32.46/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
66471+++ linux-2.6.32.46/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
66472@@ -861,7 +861,7 @@ out:
66473
66474 static void check_for_stack(struct device *dev, void *addr)
66475 {
66476- if (object_is_on_stack(addr))
66477+ if (object_starts_on_stack(addr))
66478 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
66479 "stack [addr=%p]\n", addr);
66480 }
66481diff -urNp linux-2.6.32.46/lib/idr.c linux-2.6.32.46/lib/idr.c
66482--- linux-2.6.32.46/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
66483+++ linux-2.6.32.46/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
66484@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
66485 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
66486
66487 /* if already at the top layer, we need to grow */
66488- if (id >= 1 << (idp->layers * IDR_BITS)) {
66489+ if (id >= (1 << (idp->layers * IDR_BITS))) {
66490 *starting_id = id;
66491 return IDR_NEED_TO_GROW;
66492 }
66493diff -urNp linux-2.6.32.46/lib/inflate.c linux-2.6.32.46/lib/inflate.c
66494--- linux-2.6.32.46/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
66495+++ linux-2.6.32.46/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
66496@@ -266,7 +266,7 @@ static void free(void *where)
66497 malloc_ptr = free_mem_ptr;
66498 }
66499 #else
66500-#define malloc(a) kmalloc(a, GFP_KERNEL)
66501+#define malloc(a) kmalloc((a), GFP_KERNEL)
66502 #define free(a) kfree(a)
66503 #endif
66504
66505diff -urNp linux-2.6.32.46/lib/Kconfig.debug linux-2.6.32.46/lib/Kconfig.debug
66506--- linux-2.6.32.46/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
66507+++ linux-2.6.32.46/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
66508@@ -905,7 +905,7 @@ config LATENCYTOP
66509 select STACKTRACE
66510 select SCHEDSTATS
66511 select SCHED_DEBUG
66512- depends on HAVE_LATENCYTOP_SUPPORT
66513+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
66514 help
66515 Enable this option if you want to use the LatencyTOP tool
66516 to find out which userspace is blocking on what kernel operations.
66517diff -urNp linux-2.6.32.46/lib/kobject.c linux-2.6.32.46/lib/kobject.c
66518--- linux-2.6.32.46/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
66519+++ linux-2.6.32.46/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
66520@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
66521 return ret;
66522 }
66523
66524-struct sysfs_ops kobj_sysfs_ops = {
66525+const struct sysfs_ops kobj_sysfs_ops = {
66526 .show = kobj_attr_show,
66527 .store = kobj_attr_store,
66528 };
66529@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
66530 * If the kset was not able to be created, NULL will be returned.
66531 */
66532 static struct kset *kset_create(const char *name,
66533- struct kset_uevent_ops *uevent_ops,
66534+ const struct kset_uevent_ops *uevent_ops,
66535 struct kobject *parent_kobj)
66536 {
66537 struct kset *kset;
66538@@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
66539 * If the kset was not able to be created, NULL will be returned.
66540 */
66541 struct kset *kset_create_and_add(const char *name,
66542- struct kset_uevent_ops *uevent_ops,
66543+ const struct kset_uevent_ops *uevent_ops,
66544 struct kobject *parent_kobj)
66545 {
66546 struct kset *kset;
66547diff -urNp linux-2.6.32.46/lib/kobject_uevent.c linux-2.6.32.46/lib/kobject_uevent.c
66548--- linux-2.6.32.46/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
66549+++ linux-2.6.32.46/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
66550@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
66551 const char *subsystem;
66552 struct kobject *top_kobj;
66553 struct kset *kset;
66554- struct kset_uevent_ops *uevent_ops;
66555+ const struct kset_uevent_ops *uevent_ops;
66556 u64 seq;
66557 int i = 0;
66558 int retval = 0;
66559diff -urNp linux-2.6.32.46/lib/kref.c linux-2.6.32.46/lib/kref.c
66560--- linux-2.6.32.46/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
66561+++ linux-2.6.32.46/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
66562@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
66563 */
66564 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
66565 {
66566- WARN_ON(release == NULL);
66567+ BUG_ON(release == NULL);
66568 WARN_ON(release == (void (*)(struct kref *))kfree);
66569
66570 if (atomic_dec_and_test(&kref->refcount)) {
66571diff -urNp linux-2.6.32.46/lib/parser.c linux-2.6.32.46/lib/parser.c
66572--- linux-2.6.32.46/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
66573+++ linux-2.6.32.46/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
66574@@ -126,7 +126,7 @@ static int match_number(substring_t *s,
66575 char *buf;
66576 int ret;
66577
66578- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
66579+ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
66580 if (!buf)
66581 return -ENOMEM;
66582 memcpy(buf, s->from, s->to - s->from);
66583diff -urNp linux-2.6.32.46/lib/radix-tree.c linux-2.6.32.46/lib/radix-tree.c
66584--- linux-2.6.32.46/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
66585+++ linux-2.6.32.46/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
66586@@ -81,7 +81,7 @@ struct radix_tree_preload {
66587 int nr;
66588 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
66589 };
66590-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
66591+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
66592
66593 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
66594 {
66595diff -urNp linux-2.6.32.46/lib/random32.c linux-2.6.32.46/lib/random32.c
66596--- linux-2.6.32.46/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
66597+++ linux-2.6.32.46/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
66598@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
66599 */
66600 static inline u32 __seed(u32 x, u32 m)
66601 {
66602- return (x < m) ? x + m : x;
66603+ return (x <= m) ? x + m + 1 : x;
66604 }
66605
66606 /**
66607diff -urNp linux-2.6.32.46/lib/vsprintf.c linux-2.6.32.46/lib/vsprintf.c
66608--- linux-2.6.32.46/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
66609+++ linux-2.6.32.46/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
66610@@ -16,6 +16,9 @@
66611 * - scnprintf and vscnprintf
66612 */
66613
66614+#ifdef CONFIG_GRKERNSEC_HIDESYM
66615+#define __INCLUDED_BY_HIDESYM 1
66616+#endif
66617 #include <stdarg.h>
66618 #include <linux/module.h>
66619 #include <linux/types.h>
66620@@ -546,12 +549,12 @@ static char *number(char *buf, char *end
66621 return buf;
66622 }
66623
66624-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
66625+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
66626 {
66627 int len, i;
66628
66629 if ((unsigned long)s < PAGE_SIZE)
66630- s = "<NULL>";
66631+ s = "(null)";
66632
66633 len = strnlen(s, spec.precision);
66634
66635@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
66636 unsigned long value = (unsigned long) ptr;
66637 #ifdef CONFIG_KALLSYMS
66638 char sym[KSYM_SYMBOL_LEN];
66639- if (ext != 'f' && ext != 's')
66640+ if (ext != 'f' && ext != 's' && ext != 'a')
66641 sprint_symbol(sym, value);
66642 else
66643 kallsyms_lookup(value, NULL, NULL, NULL, sym);
66644@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
66645 * - 'f' For simple symbolic function names without offset
66646 * - 'S' For symbolic direct pointers with offset
66647 * - 's' For symbolic direct pointers without offset
66648+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
66649+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
66650 * - 'R' For a struct resource pointer, it prints the range of
66651 * addresses (not the name nor the flags)
66652 * - 'M' For a 6-byte MAC address, it prints the address in the
66653@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
66654 struct printf_spec spec)
66655 {
66656 if (!ptr)
66657- return string(buf, end, "(null)", spec);
66658+ return string(buf, end, "(nil)", spec);
66659
66660 switch (*fmt) {
66661 case 'F':
66662@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
66663 case 's':
66664 /* Fallthrough */
66665 case 'S':
66666+#ifdef CONFIG_GRKERNSEC_HIDESYM
66667+ break;
66668+#else
66669+ return symbol_string(buf, end, ptr, spec, *fmt);
66670+#endif
66671+ case 'a':
66672+ /* Fallthrough */
66673+ case 'A':
66674 return symbol_string(buf, end, ptr, spec, *fmt);
66675 case 'R':
66676 return resource_string(buf, end, ptr, spec);
66677@@ -1445,7 +1458,7 @@ do { \
66678 size_t len;
66679 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
66680 || (unsigned long)save_str < PAGE_SIZE)
66681- save_str = "<NULL>";
66682+ save_str = "(null)";
66683 len = strlen(save_str);
66684 if (str + len + 1 < end)
66685 memcpy(str, save_str, len + 1);
66686@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
66687 typeof(type) value; \
66688 if (sizeof(type) == 8) { \
66689 args = PTR_ALIGN(args, sizeof(u32)); \
66690- *(u32 *)&value = *(u32 *)args; \
66691- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
66692+ *(u32 *)&value = *(const u32 *)args; \
66693+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
66694 } else { \
66695 args = PTR_ALIGN(args, sizeof(type)); \
66696- value = *(typeof(type) *)args; \
66697+ value = *(const typeof(type) *)args; \
66698 } \
66699 args += sizeof(type); \
66700 value; \
66701@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
66702 const char *str_arg = args;
66703 size_t len = strlen(str_arg);
66704 args += len + 1;
66705- str = string(str, end, (char *)str_arg, spec);
66706+ str = string(str, end, str_arg, spec);
66707 break;
66708 }
66709
66710diff -urNp linux-2.6.32.46/localversion-grsec linux-2.6.32.46/localversion-grsec
66711--- linux-2.6.32.46/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
66712+++ linux-2.6.32.46/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
66713@@ -0,0 +1 @@
66714+-grsec
66715diff -urNp linux-2.6.32.46/Makefile linux-2.6.32.46/Makefile
66716--- linux-2.6.32.46/Makefile 2011-08-29 22:24:44.000000000 -0400
66717+++ linux-2.6.32.46/Makefile 2011-08-29 22:25:07.000000000 -0400
66718@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
66719
66720 HOSTCC = gcc
66721 HOSTCXX = g++
66722-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
66723-HOSTCXXFLAGS = -O2
66724+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
66725+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
66726+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
66727
66728 # Decide whether to build built-in, modular, or both.
66729 # Normally, just do built-in.
66730@@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
66731 KBUILD_CPPFLAGS := -D__KERNEL__
66732
66733 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
66734+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
66735 -fno-strict-aliasing -fno-common \
66736 -Werror-implicit-function-declaration \
66737 -Wno-format-security \
66738 -fno-delete-null-pointer-checks
66739+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
66740 KBUILD_AFLAGS := -D__ASSEMBLY__
66741
66742 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
66743@@ -376,9 +379,10 @@ export RCS_TAR_IGNORE := --exclude SCCS
66744 # Rules shared between *config targets and build targets
66745
66746 # Basic helpers built in scripts/
66747-PHONY += scripts_basic
66748-scripts_basic:
66749+PHONY += scripts_basic0 scripts_basic gcc-plugins
66750+scripts_basic0:
66751 $(Q)$(MAKE) $(build)=scripts/basic
66752+scripts_basic: scripts_basic0 gcc-plugins
66753
66754 # To avoid any implicit rule to kick in, define an empty command.
66755 scripts/basic/%: scripts_basic ;
66756@@ -403,7 +407,7 @@ endif
66757 # of make so .config is not included in this case either (for *config).
66758
66759 no-dot-config-targets := clean mrproper distclean \
66760- cscope TAGS tags help %docs check% \
66761+ cscope gtags TAGS tags help %docs check% \
66762 include/linux/version.h headers_% \
66763 kernelrelease kernelversion
66764
66765@@ -526,6 +530,24 @@ else
66766 KBUILD_CFLAGS += -O2
66767 endif
66768
66769+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
66770+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
66771+ifdef CONFIG_PAX_MEMORY_STACKLEAK
66772+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
66773+endif
66774+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
66775+gcc-plugins:
66776+ $(Q)$(MAKE) $(build)=tools/gcc
66777+else
66778+gcc-plugins:
66779+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
66780+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
66781+else
66782+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
66783+endif
66784+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
66785+endif
66786+
66787 include $(srctree)/arch/$(SRCARCH)/Makefile
66788
66789 ifneq ($(CONFIG_FRAME_WARN),0)
66790@@ -644,7 +666,7 @@ export mod_strip_cmd
66791
66792
66793 ifeq ($(KBUILD_EXTMOD),)
66794-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
66795+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
66796
66797 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
66798 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
66799@@ -840,6 +862,7 @@ define rule_vmlinux-modpost
66800 endef
66801
66802 # vmlinux image - including updated kernel symbols
66803+vmlinux: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
66804 vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
66805 ifdef CONFIG_HEADERS_CHECK
66806 $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
66807@@ -970,7 +993,7 @@ ifneq ($(KBUILD_SRC),)
66808 endif
66809
66810 # prepare2 creates a makefile if using a separate output directory
66811-prepare2: prepare3 outputmakefile
66812+prepare2: prepare3 outputmakefile gcc-plugins
66813
66814 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
66815 include/asm include/config/auto.conf
66816@@ -1124,6 +1147,7 @@ all: modules
66817 # using awk while concatenating to the final file.
66818
66819 PHONY += modules
66820+modules: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
66821 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
66822 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
66823 @$(kecho) ' Building modules, stage 2.';
66824@@ -1198,7 +1222,7 @@ MRPROPER_FILES += .config .config.old in
66825 include/linux/autoconf.h include/linux/version.h \
66826 include/linux/utsrelease.h \
66827 include/linux/bounds.h include/asm*/asm-offsets.h \
66828- Module.symvers Module.markers tags TAGS cscope*
66829+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
66830
66831 # clean - Delete most, but leave enough to build external modules
66832 #
66833@@ -1214,7 +1238,7 @@ clean: archclean $(clean-dirs)
66834 $(call cmd,rmdirs)
66835 $(call cmd,rmfiles)
66836 @find . $(RCS_FIND_IGNORE) \
66837- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
66838+ \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
66839 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
66840 -o -name '*.symtypes' -o -name 'modules.order' \
66841 -o -name 'Module.markers' -o -name '.tmp_*.o.*' \
66842@@ -1289,6 +1313,7 @@ help:
66843 @echo ' modules_prepare - Set up for building external modules'
66844 @echo ' tags/TAGS - Generate tags file for editors'
66845 @echo ' cscope - Generate cscope index'
66846+ @echo ' gtags - Generate GNU GLOBAL index'
66847 @echo ' kernelrelease - Output the release version string'
66848 @echo ' kernelversion - Output the version stored in Makefile'
66849 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
66850@@ -1390,6 +1415,7 @@ PHONY += $(module-dirs) modules
66851 $(module-dirs): crmodverdir $(objtree)/Module.symvers
66852 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
66853
66854+modules: KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
66855 modules: $(module-dirs)
66856 @$(kecho) ' Building modules, stage 2.';
66857 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
66858@@ -1445,7 +1471,7 @@ endif # KBUILD_EXTMOD
66859 quiet_cmd_tags = GEN $@
66860 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
66861
66862-tags TAGS cscope: FORCE
66863+tags TAGS cscope gtags: FORCE
66864 $(call cmd,tags)
66865
66866 # Scripts to check various things for consistency
66867diff -urNp linux-2.6.32.46/mm/backing-dev.c linux-2.6.32.46/mm/backing-dev.c
66868--- linux-2.6.32.46/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
66869+++ linux-2.6.32.46/mm/backing-dev.c 2011-08-11 19:48:17.000000000 -0400
66870@@ -272,7 +272,7 @@ static void bdi_task_init(struct backing
66871 list_add_tail_rcu(&wb->list, &bdi->wb_list);
66872 spin_unlock(&bdi->wb_lock);
66873
66874- tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
66875+ tsk->flags |= PF_SWAPWRITE;
66876 set_freezable();
66877
66878 /*
66879@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
66880 * Add the default flusher task that gets created for any bdi
66881 * that has dirty data pending writeout
66882 */
66883-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
66884+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
66885 {
66886 if (!bdi_cap_writeback_dirty(bdi))
66887 return;
66888diff -urNp linux-2.6.32.46/mm/filemap.c linux-2.6.32.46/mm/filemap.c
66889--- linux-2.6.32.46/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
66890+++ linux-2.6.32.46/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
66891@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
66892 struct address_space *mapping = file->f_mapping;
66893
66894 if (!mapping->a_ops->readpage)
66895- return -ENOEXEC;
66896+ return -ENODEV;
66897 file_accessed(file);
66898 vma->vm_ops = &generic_file_vm_ops;
66899 vma->vm_flags |= VM_CAN_NONLINEAR;
66900@@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
66901 *pos = i_size_read(inode);
66902
66903 if (limit != RLIM_INFINITY) {
66904+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
66905 if (*pos >= limit) {
66906 send_sig(SIGXFSZ, current, 0);
66907 return -EFBIG;
66908diff -urNp linux-2.6.32.46/mm/fremap.c linux-2.6.32.46/mm/fremap.c
66909--- linux-2.6.32.46/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
66910+++ linux-2.6.32.46/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
66911@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
66912 retry:
66913 vma = find_vma(mm, start);
66914
66915+#ifdef CONFIG_PAX_SEGMEXEC
66916+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
66917+ goto out;
66918+#endif
66919+
66920 /*
66921 * Make sure the vma is shared, that it supports prefaulting,
66922 * and that the remapped range is valid and fully within
66923@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
66924 /*
66925 * drop PG_Mlocked flag for over-mapped range
66926 */
66927- unsigned int saved_flags = vma->vm_flags;
66928+ unsigned long saved_flags = vma->vm_flags;
66929 munlock_vma_pages_range(vma, start, start + size);
66930 vma->vm_flags = saved_flags;
66931 }
66932diff -urNp linux-2.6.32.46/mm/highmem.c linux-2.6.32.46/mm/highmem.c
66933--- linux-2.6.32.46/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
66934+++ linux-2.6.32.46/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
66935@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
66936 * So no dangers, even with speculative execution.
66937 */
66938 page = pte_page(pkmap_page_table[i]);
66939+ pax_open_kernel();
66940 pte_clear(&init_mm, (unsigned long)page_address(page),
66941 &pkmap_page_table[i]);
66942-
66943+ pax_close_kernel();
66944 set_page_address(page, NULL);
66945 need_flush = 1;
66946 }
66947@@ -177,9 +178,11 @@ start:
66948 }
66949 }
66950 vaddr = PKMAP_ADDR(last_pkmap_nr);
66951+
66952+ pax_open_kernel();
66953 set_pte_at(&init_mm, vaddr,
66954 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
66955-
66956+ pax_close_kernel();
66957 pkmap_count[last_pkmap_nr] = 1;
66958 set_page_address(page, (void *)vaddr);
66959
66960diff -urNp linux-2.6.32.46/mm/hugetlb.c linux-2.6.32.46/mm/hugetlb.c
66961--- linux-2.6.32.46/mm/hugetlb.c 2011-07-13 17:23:04.000000000 -0400
66962+++ linux-2.6.32.46/mm/hugetlb.c 2011-07-13 17:23:19.000000000 -0400
66963@@ -1933,6 +1933,26 @@ static int unmap_ref_private(struct mm_s
66964 return 1;
66965 }
66966
66967+#ifdef CONFIG_PAX_SEGMEXEC
66968+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
66969+{
66970+ struct mm_struct *mm = vma->vm_mm;
66971+ struct vm_area_struct *vma_m;
66972+ unsigned long address_m;
66973+ pte_t *ptep_m;
66974+
66975+ vma_m = pax_find_mirror_vma(vma);
66976+ if (!vma_m)
66977+ return;
66978+
66979+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
66980+ address_m = address + SEGMEXEC_TASK_SIZE;
66981+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
66982+ get_page(page_m);
66983+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
66984+}
66985+#endif
66986+
66987 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
66988 unsigned long address, pte_t *ptep, pte_t pte,
66989 struct page *pagecache_page)
66990@@ -2004,6 +2024,11 @@ retry_avoidcopy:
66991 huge_ptep_clear_flush(vma, address, ptep);
66992 set_huge_pte_at(mm, address, ptep,
66993 make_huge_pte(vma, new_page, 1));
66994+
66995+#ifdef CONFIG_PAX_SEGMEXEC
66996+ pax_mirror_huge_pte(vma, address, new_page);
66997+#endif
66998+
66999 /* Make the old page be freed below */
67000 new_page = old_page;
67001 }
67002@@ -2135,6 +2160,10 @@ retry:
67003 && (vma->vm_flags & VM_SHARED)));
67004 set_huge_pte_at(mm, address, ptep, new_pte);
67005
67006+#ifdef CONFIG_PAX_SEGMEXEC
67007+ pax_mirror_huge_pte(vma, address, page);
67008+#endif
67009+
67010 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67011 /* Optimization, do the COW without a second fault */
67012 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67013@@ -2163,6 +2192,28 @@ int hugetlb_fault(struct mm_struct *mm,
67014 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67015 struct hstate *h = hstate_vma(vma);
67016
67017+#ifdef CONFIG_PAX_SEGMEXEC
67018+ struct vm_area_struct *vma_m;
67019+
67020+ vma_m = pax_find_mirror_vma(vma);
67021+ if (vma_m) {
67022+ unsigned long address_m;
67023+
67024+ if (vma->vm_start > vma_m->vm_start) {
67025+ address_m = address;
67026+ address -= SEGMEXEC_TASK_SIZE;
67027+ vma = vma_m;
67028+ h = hstate_vma(vma);
67029+ } else
67030+ address_m = address + SEGMEXEC_TASK_SIZE;
67031+
67032+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67033+ return VM_FAULT_OOM;
67034+ address_m &= HPAGE_MASK;
67035+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67036+ }
67037+#endif
67038+
67039 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67040 if (!ptep)
67041 return VM_FAULT_OOM;
67042diff -urNp linux-2.6.32.46/mm/internal.h linux-2.6.32.46/mm/internal.h
67043--- linux-2.6.32.46/mm/internal.h 2011-03-27 14:31:47.000000000 -0400
67044+++ linux-2.6.32.46/mm/internal.h 2011-07-09 09:13:08.000000000 -0400
67045@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
67046 * in mm/page_alloc.c
67047 */
67048 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67049+extern void free_compound_page(struct page *page);
67050 extern void prep_compound_page(struct page *page, unsigned long order);
67051
67052
67053diff -urNp linux-2.6.32.46/mm/Kconfig linux-2.6.32.46/mm/Kconfig
67054--- linux-2.6.32.46/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
67055+++ linux-2.6.32.46/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
67056@@ -228,7 +228,7 @@ config KSM
67057 config DEFAULT_MMAP_MIN_ADDR
67058 int "Low address space to protect from user allocation"
67059 depends on MMU
67060- default 4096
67061+ default 65536
67062 help
67063 This is the portion of low virtual memory which should be protected
67064 from userspace allocation. Keeping a user from writing to low pages
67065diff -urNp linux-2.6.32.46/mm/kmemleak.c linux-2.6.32.46/mm/kmemleak.c
67066--- linux-2.6.32.46/mm/kmemleak.c 2011-06-25 12:55:35.000000000 -0400
67067+++ linux-2.6.32.46/mm/kmemleak.c 2011-06-25 12:56:37.000000000 -0400
67068@@ -358,7 +358,7 @@ static void print_unreferenced(struct se
67069
67070 for (i = 0; i < object->trace_len; i++) {
67071 void *ptr = (void *)object->trace[i];
67072- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67073+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67074 }
67075 }
67076
67077diff -urNp linux-2.6.32.46/mm/maccess.c linux-2.6.32.46/mm/maccess.c
67078--- linux-2.6.32.46/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
67079+++ linux-2.6.32.46/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
67080@@ -14,7 +14,7 @@
67081 * Safely read from address @src to the buffer at @dst. If a kernel fault
67082 * happens, handle that and return -EFAULT.
67083 */
67084-long probe_kernel_read(void *dst, void *src, size_t size)
67085+long probe_kernel_read(void *dst, const void *src, size_t size)
67086 {
67087 long ret;
67088 mm_segment_t old_fs = get_fs();
67089@@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
67090 * Safely write to address @dst from the buffer at @src. If a kernel fault
67091 * happens, handle that and return -EFAULT.
67092 */
67093-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
67094+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
67095 {
67096 long ret;
67097 mm_segment_t old_fs = get_fs();
67098diff -urNp linux-2.6.32.46/mm/madvise.c linux-2.6.32.46/mm/madvise.c
67099--- linux-2.6.32.46/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
67100+++ linux-2.6.32.46/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
67101@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
67102 pgoff_t pgoff;
67103 unsigned long new_flags = vma->vm_flags;
67104
67105+#ifdef CONFIG_PAX_SEGMEXEC
67106+ struct vm_area_struct *vma_m;
67107+#endif
67108+
67109 switch (behavior) {
67110 case MADV_NORMAL:
67111 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67112@@ -103,6 +107,13 @@ success:
67113 /*
67114 * vm_flags is protected by the mmap_sem held in write mode.
67115 */
67116+
67117+#ifdef CONFIG_PAX_SEGMEXEC
67118+ vma_m = pax_find_mirror_vma(vma);
67119+ if (vma_m)
67120+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67121+#endif
67122+
67123 vma->vm_flags = new_flags;
67124
67125 out:
67126@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
67127 struct vm_area_struct ** prev,
67128 unsigned long start, unsigned long end)
67129 {
67130+
67131+#ifdef CONFIG_PAX_SEGMEXEC
67132+ struct vm_area_struct *vma_m;
67133+#endif
67134+
67135 *prev = vma;
67136 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67137 return -EINVAL;
67138@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
67139 zap_page_range(vma, start, end - start, &details);
67140 } else
67141 zap_page_range(vma, start, end - start, NULL);
67142+
67143+#ifdef CONFIG_PAX_SEGMEXEC
67144+ vma_m = pax_find_mirror_vma(vma);
67145+ if (vma_m) {
67146+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67147+ struct zap_details details = {
67148+ .nonlinear_vma = vma_m,
67149+ .last_index = ULONG_MAX,
67150+ };
67151+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67152+ } else
67153+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67154+ }
67155+#endif
67156+
67157 return 0;
67158 }
67159
67160@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67161 if (end < start)
67162 goto out;
67163
67164+#ifdef CONFIG_PAX_SEGMEXEC
67165+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67166+ if (end > SEGMEXEC_TASK_SIZE)
67167+ goto out;
67168+ } else
67169+#endif
67170+
67171+ if (end > TASK_SIZE)
67172+ goto out;
67173+
67174 error = 0;
67175 if (end == start)
67176 goto out;
67177diff -urNp linux-2.6.32.46/mm/memory.c linux-2.6.32.46/mm/memory.c
67178--- linux-2.6.32.46/mm/memory.c 2011-07-13 17:23:04.000000000 -0400
67179+++ linux-2.6.32.46/mm/memory.c 2011-07-13 17:23:23.000000000 -0400
67180@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
67181 return;
67182
67183 pmd = pmd_offset(pud, start);
67184+
67185+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67186 pud_clear(pud);
67187 pmd_free_tlb(tlb, pmd, start);
67188+#endif
67189+
67190 }
67191
67192 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67193@@ -219,9 +223,12 @@ static inline void free_pud_range(struct
67194 if (end - 1 > ceiling - 1)
67195 return;
67196
67197+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67198 pud = pud_offset(pgd, start);
67199 pgd_clear(pgd);
67200 pud_free_tlb(tlb, pud, start);
67201+#endif
67202+
67203 }
67204
67205 /*
67206@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
67207 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67208 i = 0;
67209
67210- do {
67211+ while (nr_pages) {
67212 struct vm_area_struct *vma;
67213
67214- vma = find_extend_vma(mm, start);
67215+ vma = find_vma(mm, start);
67216 if (!vma && in_gate_area(tsk, start)) {
67217 unsigned long pg = start & PAGE_MASK;
67218 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
67219@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
67220 continue;
67221 }
67222
67223- if (!vma ||
67224+ if (!vma || start < vma->vm_start ||
67225 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67226 !(vm_flags & vma->vm_flags))
67227 return i ? : -EFAULT;
67228@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
67229 start += PAGE_SIZE;
67230 nr_pages--;
67231 } while (nr_pages && start < vma->vm_end);
67232- } while (nr_pages);
67233+ }
67234 return i;
67235 }
67236
67237@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
67238 page_add_file_rmap(page);
67239 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67240
67241+#ifdef CONFIG_PAX_SEGMEXEC
67242+ pax_mirror_file_pte(vma, addr, page, ptl);
67243+#endif
67244+
67245 retval = 0;
67246 pte_unmap_unlock(pte, ptl);
67247 return retval;
67248@@ -1560,10 +1571,22 @@ out:
67249 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67250 struct page *page)
67251 {
67252+
67253+#ifdef CONFIG_PAX_SEGMEXEC
67254+ struct vm_area_struct *vma_m;
67255+#endif
67256+
67257 if (addr < vma->vm_start || addr >= vma->vm_end)
67258 return -EFAULT;
67259 if (!page_count(page))
67260 return -EINVAL;
67261+
67262+#ifdef CONFIG_PAX_SEGMEXEC
67263+ vma_m = pax_find_mirror_vma(vma);
67264+ if (vma_m)
67265+ vma_m->vm_flags |= VM_INSERTPAGE;
67266+#endif
67267+
67268 vma->vm_flags |= VM_INSERTPAGE;
67269 return insert_page(vma, addr, page, vma->vm_page_prot);
67270 }
67271@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
67272 unsigned long pfn)
67273 {
67274 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67275+ BUG_ON(vma->vm_mirror);
67276
67277 if (addr < vma->vm_start || addr >= vma->vm_end)
67278 return -EFAULT;
67279@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
67280 copy_user_highpage(dst, src, va, vma);
67281 }
67282
67283+#ifdef CONFIG_PAX_SEGMEXEC
67284+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67285+{
67286+ struct mm_struct *mm = vma->vm_mm;
67287+ spinlock_t *ptl;
67288+ pte_t *pte, entry;
67289+
67290+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67291+ entry = *pte;
67292+ if (!pte_present(entry)) {
67293+ if (!pte_none(entry)) {
67294+ BUG_ON(pte_file(entry));
67295+ free_swap_and_cache(pte_to_swp_entry(entry));
67296+ pte_clear_not_present_full(mm, address, pte, 0);
67297+ }
67298+ } else {
67299+ struct page *page;
67300+
67301+ flush_cache_page(vma, address, pte_pfn(entry));
67302+ entry = ptep_clear_flush(vma, address, pte);
67303+ BUG_ON(pte_dirty(entry));
67304+ page = vm_normal_page(vma, address, entry);
67305+ if (page) {
67306+ update_hiwater_rss(mm);
67307+ if (PageAnon(page))
67308+ dec_mm_counter(mm, anon_rss);
67309+ else
67310+ dec_mm_counter(mm, file_rss);
67311+ page_remove_rmap(page);
67312+ page_cache_release(page);
67313+ }
67314+ }
67315+ pte_unmap_unlock(pte, ptl);
67316+}
67317+
67318+/* PaX: if vma is mirrored, synchronize the mirror's PTE
67319+ *
67320+ * the ptl of the lower mapped page is held on entry and is not released on exit
67321+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67322+ */
67323+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67324+{
67325+ struct mm_struct *mm = vma->vm_mm;
67326+ unsigned long address_m;
67327+ spinlock_t *ptl_m;
67328+ struct vm_area_struct *vma_m;
67329+ pmd_t *pmd_m;
67330+ pte_t *pte_m, entry_m;
67331+
67332+ BUG_ON(!page_m || !PageAnon(page_m));
67333+
67334+ vma_m = pax_find_mirror_vma(vma);
67335+ if (!vma_m)
67336+ return;
67337+
67338+ BUG_ON(!PageLocked(page_m));
67339+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67340+ address_m = address + SEGMEXEC_TASK_SIZE;
67341+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67342+ pte_m = pte_offset_map_nested(pmd_m, address_m);
67343+ ptl_m = pte_lockptr(mm, pmd_m);
67344+ if (ptl != ptl_m) {
67345+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67346+ if (!pte_none(*pte_m))
67347+ goto out;
67348+ }
67349+
67350+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67351+ page_cache_get(page_m);
67352+ page_add_anon_rmap(page_m, vma_m, address_m);
67353+ inc_mm_counter(mm, anon_rss);
67354+ set_pte_at(mm, address_m, pte_m, entry_m);
67355+ update_mmu_cache(vma_m, address_m, entry_m);
67356+out:
67357+ if (ptl != ptl_m)
67358+ spin_unlock(ptl_m);
67359+ pte_unmap_nested(pte_m);
67360+ unlock_page(page_m);
67361+}
67362+
67363+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67364+{
67365+ struct mm_struct *mm = vma->vm_mm;
67366+ unsigned long address_m;
67367+ spinlock_t *ptl_m;
67368+ struct vm_area_struct *vma_m;
67369+ pmd_t *pmd_m;
67370+ pte_t *pte_m, entry_m;
67371+
67372+ BUG_ON(!page_m || PageAnon(page_m));
67373+
67374+ vma_m = pax_find_mirror_vma(vma);
67375+ if (!vma_m)
67376+ return;
67377+
67378+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67379+ address_m = address + SEGMEXEC_TASK_SIZE;
67380+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67381+ pte_m = pte_offset_map_nested(pmd_m, address_m);
67382+ ptl_m = pte_lockptr(mm, pmd_m);
67383+ if (ptl != ptl_m) {
67384+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67385+ if (!pte_none(*pte_m))
67386+ goto out;
67387+ }
67388+
67389+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67390+ page_cache_get(page_m);
67391+ page_add_file_rmap(page_m);
67392+ inc_mm_counter(mm, file_rss);
67393+ set_pte_at(mm, address_m, pte_m, entry_m);
67394+ update_mmu_cache(vma_m, address_m, entry_m);
67395+out:
67396+ if (ptl != ptl_m)
67397+ spin_unlock(ptl_m);
67398+ pte_unmap_nested(pte_m);
67399+}
67400+
67401+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67402+{
67403+ struct mm_struct *mm = vma->vm_mm;
67404+ unsigned long address_m;
67405+ spinlock_t *ptl_m;
67406+ struct vm_area_struct *vma_m;
67407+ pmd_t *pmd_m;
67408+ pte_t *pte_m, entry_m;
67409+
67410+ vma_m = pax_find_mirror_vma(vma);
67411+ if (!vma_m)
67412+ return;
67413+
67414+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67415+ address_m = address + SEGMEXEC_TASK_SIZE;
67416+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67417+ pte_m = pte_offset_map_nested(pmd_m, address_m);
67418+ ptl_m = pte_lockptr(mm, pmd_m);
67419+ if (ptl != ptl_m) {
67420+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67421+ if (!pte_none(*pte_m))
67422+ goto out;
67423+ }
67424+
67425+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67426+ set_pte_at(mm, address_m, pte_m, entry_m);
67427+out:
67428+ if (ptl != ptl_m)
67429+ spin_unlock(ptl_m);
67430+ pte_unmap_nested(pte_m);
67431+}
67432+
67433+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67434+{
67435+ struct page *page_m;
67436+ pte_t entry;
67437+
67438+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67439+ goto out;
67440+
67441+ entry = *pte;
67442+ page_m = vm_normal_page(vma, address, entry);
67443+ if (!page_m)
67444+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67445+ else if (PageAnon(page_m)) {
67446+ if (pax_find_mirror_vma(vma)) {
67447+ pte_unmap_unlock(pte, ptl);
67448+ lock_page(page_m);
67449+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67450+ if (pte_same(entry, *pte))
67451+ pax_mirror_anon_pte(vma, address, page_m, ptl);
67452+ else
67453+ unlock_page(page_m);
67454+ }
67455+ } else
67456+ pax_mirror_file_pte(vma, address, page_m, ptl);
67457+
67458+out:
67459+ pte_unmap_unlock(pte, ptl);
67460+}
67461+#endif
67462+
67463 /*
67464 * This routine handles present pages, when users try to write
67465 * to a shared page. It is done by copying the page to a new address
67466@@ -2156,6 +2360,12 @@ gotten:
67467 */
67468 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67469 if (likely(pte_same(*page_table, orig_pte))) {
67470+
67471+#ifdef CONFIG_PAX_SEGMEXEC
67472+ if (pax_find_mirror_vma(vma))
67473+ BUG_ON(!trylock_page(new_page));
67474+#endif
67475+
67476 if (old_page) {
67477 if (!PageAnon(old_page)) {
67478 dec_mm_counter(mm, file_rss);
67479@@ -2207,6 +2417,10 @@ gotten:
67480 page_remove_rmap(old_page);
67481 }
67482
67483+#ifdef CONFIG_PAX_SEGMEXEC
67484+ pax_mirror_anon_pte(vma, address, new_page, ptl);
67485+#endif
67486+
67487 /* Free the old page.. */
67488 new_page = old_page;
67489 ret |= VM_FAULT_WRITE;
67490@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct
67491 swap_free(entry);
67492 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
67493 try_to_free_swap(page);
67494+
67495+#ifdef CONFIG_PAX_SEGMEXEC
67496+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
67497+#endif
67498+
67499 unlock_page(page);
67500
67501 if (flags & FAULT_FLAG_WRITE) {
67502@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct
67503
67504 /* No need to invalidate - it was non-present before */
67505 update_mmu_cache(vma, address, pte);
67506+
67507+#ifdef CONFIG_PAX_SEGMEXEC
67508+ pax_mirror_anon_pte(vma, address, page, ptl);
67509+#endif
67510+
67511 unlock:
67512 pte_unmap_unlock(page_table, ptl);
67513 out:
67514@@ -2632,40 +2856,6 @@ out_release:
67515 }
67516
67517 /*
67518- * This is like a special single-page "expand_{down|up}wards()",
67519- * except we must first make sure that 'address{-|+}PAGE_SIZE'
67520- * doesn't hit another vma.
67521- */
67522-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
67523-{
67524- address &= PAGE_MASK;
67525- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
67526- struct vm_area_struct *prev = vma->vm_prev;
67527-
67528- /*
67529- * Is there a mapping abutting this one below?
67530- *
67531- * That's only ok if it's the same stack mapping
67532- * that has gotten split..
67533- */
67534- if (prev && prev->vm_end == address)
67535- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
67536-
67537- expand_stack(vma, address - PAGE_SIZE);
67538- }
67539- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
67540- struct vm_area_struct *next = vma->vm_next;
67541-
67542- /* As VM_GROWSDOWN but s/below/above/ */
67543- if (next && next->vm_start == address + PAGE_SIZE)
67544- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
67545-
67546- expand_upwards(vma, address + PAGE_SIZE);
67547- }
67548- return 0;
67549-}
67550-
67551-/*
67552 * We enter with non-exclusive mmap_sem (to exclude vma changes,
67553 * but allow concurrent faults), and pte mapped but not yet locked.
67554 * We return with mmap_sem still held, but pte unmapped and unlocked.
67555@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_s
67556 unsigned long address, pte_t *page_table, pmd_t *pmd,
67557 unsigned int flags)
67558 {
67559- struct page *page;
67560+ struct page *page = NULL;
67561 spinlock_t *ptl;
67562 pte_t entry;
67563
67564- pte_unmap(page_table);
67565-
67566- /* Check if we need to add a guard page to the stack */
67567- if (check_stack_guard_page(vma, address) < 0)
67568- return VM_FAULT_SIGBUS;
67569-
67570- /* Use the zero-page for reads */
67571 if (!(flags & FAULT_FLAG_WRITE)) {
67572 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
67573 vma->vm_page_prot));
67574- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67575+ ptl = pte_lockptr(mm, pmd);
67576+ spin_lock(ptl);
67577 if (!pte_none(*page_table))
67578 goto unlock;
67579 goto setpte;
67580 }
67581
67582 /* Allocate our own private page. */
67583+ pte_unmap(page_table);
67584+
67585 if (unlikely(anon_vma_prepare(vma)))
67586 goto oom;
67587 page = alloc_zeroed_user_highpage_movable(vma, address);
67588@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_s
67589 if (!pte_none(*page_table))
67590 goto release;
67591
67592+#ifdef CONFIG_PAX_SEGMEXEC
67593+ if (pax_find_mirror_vma(vma))
67594+ BUG_ON(!trylock_page(page));
67595+#endif
67596+
67597 inc_mm_counter(mm, anon_rss);
67598 page_add_new_anon_rmap(page, vma, address);
67599 setpte:
67600@@ -2720,6 +2911,12 @@ setpte:
67601
67602 /* No need to invalidate - it was non-present before */
67603 update_mmu_cache(vma, address, entry);
67604+
67605+#ifdef CONFIG_PAX_SEGMEXEC
67606+ if (page)
67607+ pax_mirror_anon_pte(vma, address, page, ptl);
67608+#endif
67609+
67610 unlock:
67611 pte_unmap_unlock(page_table, ptl);
67612 return 0;
67613@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *
67614 */
67615 /* Only go through if we didn't race with anybody else... */
67616 if (likely(pte_same(*page_table, orig_pte))) {
67617+
67618+#ifdef CONFIG_PAX_SEGMEXEC
67619+ if (anon && pax_find_mirror_vma(vma))
67620+ BUG_ON(!trylock_page(page));
67621+#endif
67622+
67623 flush_icache_page(vma, page);
67624 entry = mk_pte(page, vma->vm_page_prot);
67625 if (flags & FAULT_FLAG_WRITE)
67626@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *
67627
67628 /* no need to invalidate: a not-present page won't be cached */
67629 update_mmu_cache(vma, address, entry);
67630+
67631+#ifdef CONFIG_PAX_SEGMEXEC
67632+ if (anon)
67633+ pax_mirror_anon_pte(vma, address, page, ptl);
67634+ else
67635+ pax_mirror_file_pte(vma, address, page, ptl);
67636+#endif
67637+
67638 } else {
67639 if (charged)
67640 mem_cgroup_uncharge_page(page);
67641@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struc
67642 if (flags & FAULT_FLAG_WRITE)
67643 flush_tlb_page(vma, address);
67644 }
67645+
67646+#ifdef CONFIG_PAX_SEGMEXEC
67647+ pax_mirror_pte(vma, address, pte, pmd, ptl);
67648+ return 0;
67649+#endif
67650+
67651 unlock:
67652 pte_unmap_unlock(pte, ptl);
67653 return 0;
67654@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm
67655 pmd_t *pmd;
67656 pte_t *pte;
67657
67658+#ifdef CONFIG_PAX_SEGMEXEC
67659+ struct vm_area_struct *vma_m;
67660+#endif
67661+
67662 __set_current_state(TASK_RUNNING);
67663
67664 count_vm_event(PGFAULT);
67665@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm
67666 if (unlikely(is_vm_hugetlb_page(vma)))
67667 return hugetlb_fault(mm, vma, address, flags);
67668
67669+#ifdef CONFIG_PAX_SEGMEXEC
67670+ vma_m = pax_find_mirror_vma(vma);
67671+ if (vma_m) {
67672+ unsigned long address_m;
67673+ pgd_t *pgd_m;
67674+ pud_t *pud_m;
67675+ pmd_t *pmd_m;
67676+
67677+ if (vma->vm_start > vma_m->vm_start) {
67678+ address_m = address;
67679+ address -= SEGMEXEC_TASK_SIZE;
67680+ vma = vma_m;
67681+ } else
67682+ address_m = address + SEGMEXEC_TASK_SIZE;
67683+
67684+ pgd_m = pgd_offset(mm, address_m);
67685+ pud_m = pud_alloc(mm, pgd_m, address_m);
67686+ if (!pud_m)
67687+ return VM_FAULT_OOM;
67688+ pmd_m = pmd_alloc(mm, pud_m, address_m);
67689+ if (!pmd_m)
67690+ return VM_FAULT_OOM;
67691+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
67692+ return VM_FAULT_OOM;
67693+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
67694+ }
67695+#endif
67696+
67697 pgd = pgd_offset(mm, address);
67698 pud = pud_alloc(mm, pgd, address);
67699 if (!pud)
67700@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
67701 gate_vma.vm_start = FIXADDR_USER_START;
67702 gate_vma.vm_end = FIXADDR_USER_END;
67703 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
67704- gate_vma.vm_page_prot = __P101;
67705+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
67706 /*
67707 * Make sure the vDSO gets into every core dump.
67708 * Dumping its contents makes post-mortem fully interpretable later
67709diff -urNp linux-2.6.32.46/mm/memory-failure.c linux-2.6.32.46/mm/memory-failure.c
67710--- linux-2.6.32.46/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
67711+++ linux-2.6.32.46/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
67712@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
67713
67714 int sysctl_memory_failure_recovery __read_mostly = 1;
67715
67716-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67717+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67718
67719 /*
67720 * Send all the processes who have the page mapped an ``action optional''
67721@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
67722 return 0;
67723 }
67724
67725- atomic_long_add(1, &mce_bad_pages);
67726+ atomic_long_add_unchecked(1, &mce_bad_pages);
67727
67728 /*
67729 * We need/can do nothing about count=0 pages.
67730diff -urNp linux-2.6.32.46/mm/mempolicy.c linux-2.6.32.46/mm/mempolicy.c
67731--- linux-2.6.32.46/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
67732+++ linux-2.6.32.46/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
67733@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
67734 struct vm_area_struct *next;
67735 int err;
67736
67737+#ifdef CONFIG_PAX_SEGMEXEC
67738+ struct vm_area_struct *vma_m;
67739+#endif
67740+
67741 err = 0;
67742 for (; vma && vma->vm_start < end; vma = next) {
67743 next = vma->vm_next;
67744@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
67745 err = policy_vma(vma, new);
67746 if (err)
67747 break;
67748+
67749+#ifdef CONFIG_PAX_SEGMEXEC
67750+ vma_m = pax_find_mirror_vma(vma);
67751+ if (vma_m) {
67752+ err = policy_vma(vma_m, new);
67753+ if (err)
67754+ break;
67755+ }
67756+#endif
67757+
67758 }
67759 return err;
67760 }
67761@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
67762
67763 if (end < start)
67764 return -EINVAL;
67765+
67766+#ifdef CONFIG_PAX_SEGMEXEC
67767+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
67768+ if (end > SEGMEXEC_TASK_SIZE)
67769+ return -EINVAL;
67770+ } else
67771+#endif
67772+
67773+ if (end > TASK_SIZE)
67774+ return -EINVAL;
67775+
67776 if (end == start)
67777 return 0;
67778
67779@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67780 if (!mm)
67781 return -EINVAL;
67782
67783+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67784+ if (mm != current->mm &&
67785+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67786+ err = -EPERM;
67787+ goto out;
67788+ }
67789+#endif
67790+
67791 /*
67792 * Check if this process has the right to modify the specified
67793 * process. The right exists if the process has administrative
67794@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
67795 rcu_read_lock();
67796 tcred = __task_cred(task);
67797 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67798- cred->uid != tcred->suid && cred->uid != tcred->uid &&
67799- !capable(CAP_SYS_NICE)) {
67800+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67801 rcu_read_unlock();
67802 err = -EPERM;
67803 goto out;
67804@@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
67805
67806 if (file) {
67807 seq_printf(m, " file=");
67808- seq_path(m, &file->f_path, "\n\t= ");
67809+ seq_path(m, &file->f_path, "\n\t\\= ");
67810 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
67811 seq_printf(m, " heap");
67812 } else if (vma->vm_start <= mm->start_stack &&
67813diff -urNp linux-2.6.32.46/mm/migrate.c linux-2.6.32.46/mm/migrate.c
67814--- linux-2.6.32.46/mm/migrate.c 2011-07-13 17:23:04.000000000 -0400
67815+++ linux-2.6.32.46/mm/migrate.c 2011-07-13 17:23:23.000000000 -0400
67816@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
67817 unsigned long chunk_start;
67818 int err;
67819
67820+ pax_track_stack();
67821+
67822 task_nodes = cpuset_mems_allowed(task);
67823
67824 err = -ENOMEM;
67825@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67826 if (!mm)
67827 return -EINVAL;
67828
67829+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67830+ if (mm != current->mm &&
67831+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
67832+ err = -EPERM;
67833+ goto out;
67834+ }
67835+#endif
67836+
67837 /*
67838 * Check if this process has the right to modify the specified
67839 * process. The right exists if the process has administrative
67840@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
67841 rcu_read_lock();
67842 tcred = __task_cred(task);
67843 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
67844- cred->uid != tcred->suid && cred->uid != tcred->uid &&
67845- !capable(CAP_SYS_NICE)) {
67846+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
67847 rcu_read_unlock();
67848 err = -EPERM;
67849 goto out;
67850diff -urNp linux-2.6.32.46/mm/mlock.c linux-2.6.32.46/mm/mlock.c
67851--- linux-2.6.32.46/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
67852+++ linux-2.6.32.46/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
67853@@ -13,6 +13,7 @@
67854 #include <linux/pagemap.h>
67855 #include <linux/mempolicy.h>
67856 #include <linux/syscalls.h>
67857+#include <linux/security.h>
67858 #include <linux/sched.h>
67859 #include <linux/module.h>
67860 #include <linux/rmap.h>
67861@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
67862 }
67863 }
67864
67865-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67866-{
67867- return (vma->vm_flags & VM_GROWSDOWN) &&
67868- (vma->vm_start == addr) &&
67869- !vma_stack_continue(vma->vm_prev, addr);
67870-}
67871-
67872 /**
67873 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
67874 * @vma: target vma
67875@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
67876 if (vma->vm_flags & VM_WRITE)
67877 gup_flags |= FOLL_WRITE;
67878
67879- /* We don't try to access the guard page of a stack vma */
67880- if (stack_guard_page(vma, start)) {
67881- addr += PAGE_SIZE;
67882- nr_pages--;
67883- }
67884-
67885 while (nr_pages > 0) {
67886 int i;
67887
67888@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
67889 {
67890 unsigned long nstart, end, tmp;
67891 struct vm_area_struct * vma, * prev;
67892- int error;
67893+ int error = -EINVAL;
67894
67895 len = PAGE_ALIGN(len);
67896 end = start + len;
67897@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
67898 return -EINVAL;
67899 if (end == start)
67900 return 0;
67901+ if (end > TASK_SIZE)
67902+ return -EINVAL;
67903+
67904 vma = find_vma_prev(current->mm, start, &prev);
67905 if (!vma || vma->vm_start > start)
67906 return -ENOMEM;
67907@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
67908 for (nstart = start ; ; ) {
67909 unsigned int newflags;
67910
67911+#ifdef CONFIG_PAX_SEGMEXEC
67912+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
67913+ break;
67914+#endif
67915+
67916 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
67917
67918 newflags = vma->vm_flags | VM_LOCKED;
67919@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
67920 lock_limit >>= PAGE_SHIFT;
67921
67922 /* check against resource limits */
67923+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
67924 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
67925 error = do_mlock(start, len, 1);
67926 up_write(&current->mm->mmap_sem);
67927@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
67928 static int do_mlockall(int flags)
67929 {
67930 struct vm_area_struct * vma, * prev = NULL;
67931- unsigned int def_flags = 0;
67932
67933 if (flags & MCL_FUTURE)
67934- def_flags = VM_LOCKED;
67935- current->mm->def_flags = def_flags;
67936+ current->mm->def_flags |= VM_LOCKED;
67937+ else
67938+ current->mm->def_flags &= ~VM_LOCKED;
67939 if (flags == MCL_FUTURE)
67940 goto out;
67941
67942 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
67943- unsigned int newflags;
67944+ unsigned long newflags;
67945+
67946+#ifdef CONFIG_PAX_SEGMEXEC
67947+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
67948+ break;
67949+#endif
67950
67951+ BUG_ON(vma->vm_end > TASK_SIZE);
67952 newflags = vma->vm_flags | VM_LOCKED;
67953 if (!(flags & MCL_CURRENT))
67954 newflags &= ~VM_LOCKED;
67955@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
67956 lock_limit >>= PAGE_SHIFT;
67957
67958 ret = -ENOMEM;
67959+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
67960 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
67961 capable(CAP_IPC_LOCK))
67962 ret = do_mlockall(flags);
67963diff -urNp linux-2.6.32.46/mm/mmap.c linux-2.6.32.46/mm/mmap.c
67964--- linux-2.6.32.46/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
67965+++ linux-2.6.32.46/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
67966@@ -45,6 +45,16 @@
67967 #define arch_rebalance_pgtables(addr, len) (addr)
67968 #endif
67969
67970+static inline void verify_mm_writelocked(struct mm_struct *mm)
67971+{
67972+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
67973+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
67974+ up_read(&mm->mmap_sem);
67975+ BUG();
67976+ }
67977+#endif
67978+}
67979+
67980 static void unmap_region(struct mm_struct *mm,
67981 struct vm_area_struct *vma, struct vm_area_struct *prev,
67982 unsigned long start, unsigned long end);
67983@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
67984 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
67985 *
67986 */
67987-pgprot_t protection_map[16] = {
67988+pgprot_t protection_map[16] __read_only = {
67989 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67990 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
67991 };
67992
67993 pgprot_t vm_get_page_prot(unsigned long vm_flags)
67994 {
67995- return __pgprot(pgprot_val(protection_map[vm_flags &
67996+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
67997 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
67998 pgprot_val(arch_vm_get_page_prot(vm_flags)));
67999+
68000+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68001+ if (!nx_enabled &&
68002+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68003+ (vm_flags & (VM_READ | VM_WRITE)))
68004+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68005+#endif
68006+
68007+ return prot;
68008 }
68009 EXPORT_SYMBOL(vm_get_page_prot);
68010
68011 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
68012 int sysctl_overcommit_ratio = 50; /* default is 50% */
68013 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68014+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68015 struct percpu_counter vm_committed_as;
68016
68017 /*
68018@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
68019 struct vm_area_struct *next = vma->vm_next;
68020
68021 might_sleep();
68022+ BUG_ON(vma->vm_mirror);
68023 if (vma->vm_ops && vma->vm_ops->close)
68024 vma->vm_ops->close(vma);
68025 if (vma->vm_file) {
68026@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68027 * not page aligned -Ram Gupta
68028 */
68029 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
68030+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68031 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68032 (mm->end_data - mm->start_data) > rlim)
68033 goto out;
68034@@ -704,6 +726,12 @@ static int
68035 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68036 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68037 {
68038+
68039+#ifdef CONFIG_PAX_SEGMEXEC
68040+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68041+ return 0;
68042+#endif
68043+
68044 if (is_mergeable_vma(vma, file, vm_flags) &&
68045 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68046 if (vma->vm_pgoff == vm_pgoff)
68047@@ -723,6 +751,12 @@ static int
68048 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68049 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68050 {
68051+
68052+#ifdef CONFIG_PAX_SEGMEXEC
68053+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68054+ return 0;
68055+#endif
68056+
68057 if (is_mergeable_vma(vma, file, vm_flags) &&
68058 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68059 pgoff_t vm_pglen;
68060@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
68061 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68062 struct vm_area_struct *prev, unsigned long addr,
68063 unsigned long end, unsigned long vm_flags,
68064- struct anon_vma *anon_vma, struct file *file,
68065+ struct anon_vma *anon_vma, struct file *file,
68066 pgoff_t pgoff, struct mempolicy *policy)
68067 {
68068 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68069 struct vm_area_struct *area, *next;
68070
68071+#ifdef CONFIG_PAX_SEGMEXEC
68072+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68073+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68074+
68075+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68076+#endif
68077+
68078 /*
68079 * We later require that vma->vm_flags == vm_flags,
68080 * so this tests vma->vm_flags & VM_SPECIAL, too.
68081@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
68082 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68083 next = next->vm_next;
68084
68085+#ifdef CONFIG_PAX_SEGMEXEC
68086+ if (prev)
68087+ prev_m = pax_find_mirror_vma(prev);
68088+ if (area)
68089+ area_m = pax_find_mirror_vma(area);
68090+ if (next)
68091+ next_m = pax_find_mirror_vma(next);
68092+#endif
68093+
68094 /*
68095 * Can it merge with the predecessor?
68096 */
68097@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
68098 /* cases 1, 6 */
68099 vma_adjust(prev, prev->vm_start,
68100 next->vm_end, prev->vm_pgoff, NULL);
68101- } else /* cases 2, 5, 7 */
68102+
68103+#ifdef CONFIG_PAX_SEGMEXEC
68104+ if (prev_m)
68105+ vma_adjust(prev_m, prev_m->vm_start,
68106+ next_m->vm_end, prev_m->vm_pgoff, NULL);
68107+#endif
68108+
68109+ } else { /* cases 2, 5, 7 */
68110 vma_adjust(prev, prev->vm_start,
68111 end, prev->vm_pgoff, NULL);
68112+
68113+#ifdef CONFIG_PAX_SEGMEXEC
68114+ if (prev_m)
68115+ vma_adjust(prev_m, prev_m->vm_start,
68116+ end_m, prev_m->vm_pgoff, NULL);
68117+#endif
68118+
68119+ }
68120 return prev;
68121 }
68122
68123@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
68124 mpol_equal(policy, vma_policy(next)) &&
68125 can_vma_merge_before(next, vm_flags,
68126 anon_vma, file, pgoff+pglen)) {
68127- if (prev && addr < prev->vm_end) /* case 4 */
68128+ if (prev && addr < prev->vm_end) { /* case 4 */
68129 vma_adjust(prev, prev->vm_start,
68130 addr, prev->vm_pgoff, NULL);
68131- else /* cases 3, 8 */
68132+
68133+#ifdef CONFIG_PAX_SEGMEXEC
68134+ if (prev_m)
68135+ vma_adjust(prev_m, prev_m->vm_start,
68136+ addr_m, prev_m->vm_pgoff, NULL);
68137+#endif
68138+
68139+ } else { /* cases 3, 8 */
68140 vma_adjust(area, addr, next->vm_end,
68141 next->vm_pgoff - pglen, NULL);
68142+
68143+#ifdef CONFIG_PAX_SEGMEXEC
68144+ if (area_m)
68145+ vma_adjust(area_m, addr_m, next_m->vm_end,
68146+ next_m->vm_pgoff - pglen, NULL);
68147+#endif
68148+
68149+ }
68150 return area;
68151 }
68152
68153@@ -898,14 +978,11 @@ none:
68154 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68155 struct file *file, long pages)
68156 {
68157- const unsigned long stack_flags
68158- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68159-
68160 if (file) {
68161 mm->shared_vm += pages;
68162 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68163 mm->exec_vm += pages;
68164- } else if (flags & stack_flags)
68165+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68166 mm->stack_vm += pages;
68167 if (flags & (VM_RESERVED|VM_IO))
68168 mm->reserved_vm += pages;
68169@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
68170 * (the exception is when the underlying filesystem is noexec
68171 * mounted, in which case we dont add PROT_EXEC.)
68172 */
68173- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68174+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68175 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68176 prot |= PROT_EXEC;
68177
68178@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
68179 /* Obtain the address to map to. we verify (or select) it and ensure
68180 * that it represents a valid section of the address space.
68181 */
68182- addr = get_unmapped_area(file, addr, len, pgoff, flags);
68183+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68184 if (addr & ~PAGE_MASK)
68185 return addr;
68186
68187@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
68188 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68189 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68190
68191+#ifdef CONFIG_PAX_MPROTECT
68192+ if (mm->pax_flags & MF_PAX_MPROTECT) {
68193+#ifndef CONFIG_PAX_MPROTECT_COMPAT
68194+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68195+ gr_log_rwxmmap(file);
68196+
68197+#ifdef CONFIG_PAX_EMUPLT
68198+ vm_flags &= ~VM_EXEC;
68199+#else
68200+ return -EPERM;
68201+#endif
68202+
68203+ }
68204+
68205+ if (!(vm_flags & VM_EXEC))
68206+ vm_flags &= ~VM_MAYEXEC;
68207+#else
68208+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68209+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68210+#endif
68211+ else
68212+ vm_flags &= ~VM_MAYWRITE;
68213+ }
68214+#endif
68215+
68216+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68217+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68218+ vm_flags &= ~VM_PAGEEXEC;
68219+#endif
68220+
68221 if (flags & MAP_LOCKED)
68222 if (!can_do_mlock())
68223 return -EPERM;
68224@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
68225 locked += mm->locked_vm;
68226 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
68227 lock_limit >>= PAGE_SHIFT;
68228+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68229 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68230 return -EAGAIN;
68231 }
68232@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
68233 if (error)
68234 return error;
68235
68236+ if (!gr_acl_handle_mmap(file, prot))
68237+ return -EACCES;
68238+
68239 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68240 }
68241 EXPORT_SYMBOL(do_mmap_pgoff);
68242@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
68243 */
68244 int vma_wants_writenotify(struct vm_area_struct *vma)
68245 {
68246- unsigned int vm_flags = vma->vm_flags;
68247+ unsigned long vm_flags = vma->vm_flags;
68248
68249 /* If it was private or non-writable, the write bit is already clear */
68250- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68251+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68252 return 0;
68253
68254 /* The backer wishes to know when pages are first written to? */
68255@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
68256 unsigned long charged = 0;
68257 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68258
68259+#ifdef CONFIG_PAX_SEGMEXEC
68260+ struct vm_area_struct *vma_m = NULL;
68261+#endif
68262+
68263+ /*
68264+ * mm->mmap_sem is required to protect against another thread
68265+ * changing the mappings in case we sleep.
68266+ */
68267+ verify_mm_writelocked(mm);
68268+
68269 /* Clear old maps */
68270 error = -ENOMEM;
68271-munmap_back:
68272 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68273 if (vma && vma->vm_start < addr + len) {
68274 if (do_munmap(mm, addr, len))
68275 return -ENOMEM;
68276- goto munmap_back;
68277+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68278+ BUG_ON(vma && vma->vm_start < addr + len);
68279 }
68280
68281 /* Check against address space limit. */
68282@@ -1173,6 +1294,16 @@ munmap_back:
68283 goto unacct_error;
68284 }
68285
68286+#ifdef CONFIG_PAX_SEGMEXEC
68287+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68288+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68289+ if (!vma_m) {
68290+ error = -ENOMEM;
68291+ goto free_vma;
68292+ }
68293+ }
68294+#endif
68295+
68296 vma->vm_mm = mm;
68297 vma->vm_start = addr;
68298 vma->vm_end = addr + len;
68299@@ -1195,6 +1326,19 @@ munmap_back:
68300 error = file->f_op->mmap(file, vma);
68301 if (error)
68302 goto unmap_and_free_vma;
68303+
68304+#ifdef CONFIG_PAX_SEGMEXEC
68305+ if (vma_m && (vm_flags & VM_EXECUTABLE))
68306+ added_exe_file_vma(mm);
68307+#endif
68308+
68309+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68310+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68311+ vma->vm_flags |= VM_PAGEEXEC;
68312+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68313+ }
68314+#endif
68315+
68316 if (vm_flags & VM_EXECUTABLE)
68317 added_exe_file_vma(mm);
68318
68319@@ -1218,6 +1362,11 @@ munmap_back:
68320 vma_link(mm, vma, prev, rb_link, rb_parent);
68321 file = vma->vm_file;
68322
68323+#ifdef CONFIG_PAX_SEGMEXEC
68324+ if (vma_m)
68325+ pax_mirror_vma(vma_m, vma);
68326+#endif
68327+
68328 /* Once vma denies write, undo our temporary denial count */
68329 if (correct_wcount)
68330 atomic_inc(&inode->i_writecount);
68331@@ -1226,6 +1375,7 @@ out:
68332
68333 mm->total_vm += len >> PAGE_SHIFT;
68334 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68335+ track_exec_limit(mm, addr, addr + len, vm_flags);
68336 if (vm_flags & VM_LOCKED) {
68337 /*
68338 * makes pages present; downgrades, drops, reacquires mmap_sem
68339@@ -1248,6 +1398,12 @@ unmap_and_free_vma:
68340 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68341 charged = 0;
68342 free_vma:
68343+
68344+#ifdef CONFIG_PAX_SEGMEXEC
68345+ if (vma_m)
68346+ kmem_cache_free(vm_area_cachep, vma_m);
68347+#endif
68348+
68349 kmem_cache_free(vm_area_cachep, vma);
68350 unacct_error:
68351 if (charged)
68352@@ -1255,6 +1411,44 @@ unacct_error:
68353 return error;
68354 }
68355
68356+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68357+{
68358+ if (!vma) {
68359+#ifdef CONFIG_STACK_GROWSUP
68360+ if (addr > sysctl_heap_stack_gap)
68361+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68362+ else
68363+ vma = find_vma(current->mm, 0);
68364+ if (vma && (vma->vm_flags & VM_GROWSUP))
68365+ return false;
68366+#endif
68367+ return true;
68368+ }
68369+
68370+ if (addr + len > vma->vm_start)
68371+ return false;
68372+
68373+ if (vma->vm_flags & VM_GROWSDOWN)
68374+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68375+#ifdef CONFIG_STACK_GROWSUP
68376+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68377+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68378+#endif
68379+
68380+ return true;
68381+}
68382+
68383+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68384+{
68385+ if (vma->vm_start < len)
68386+ return -ENOMEM;
68387+ if (!(vma->vm_flags & VM_GROWSDOWN))
68388+ return vma->vm_start - len;
68389+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
68390+ return vma->vm_start - len - sysctl_heap_stack_gap;
68391+ return -ENOMEM;
68392+}
68393+
68394 /* Get an address range which is currently unmapped.
68395 * For shmat() with addr=0.
68396 *
68397@@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
68398 if (flags & MAP_FIXED)
68399 return addr;
68400
68401+#ifdef CONFIG_PAX_RANDMMAP
68402+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68403+#endif
68404+
68405 if (addr) {
68406 addr = PAGE_ALIGN(addr);
68407- vma = find_vma(mm, addr);
68408- if (TASK_SIZE - len >= addr &&
68409- (!vma || addr + len <= vma->vm_start))
68410- return addr;
68411+ if (TASK_SIZE - len >= addr) {
68412+ vma = find_vma(mm, addr);
68413+ if (check_heap_stack_gap(vma, addr, len))
68414+ return addr;
68415+ }
68416 }
68417 if (len > mm->cached_hole_size) {
68418- start_addr = addr = mm->free_area_cache;
68419+ start_addr = addr = mm->free_area_cache;
68420 } else {
68421- start_addr = addr = TASK_UNMAPPED_BASE;
68422- mm->cached_hole_size = 0;
68423+ start_addr = addr = mm->mmap_base;
68424+ mm->cached_hole_size = 0;
68425 }
68426
68427 full_search:
68428@@ -1303,34 +1502,40 @@ full_search:
68429 * Start a new search - just in case we missed
68430 * some holes.
68431 */
68432- if (start_addr != TASK_UNMAPPED_BASE) {
68433- addr = TASK_UNMAPPED_BASE;
68434- start_addr = addr;
68435+ if (start_addr != mm->mmap_base) {
68436+ start_addr = addr = mm->mmap_base;
68437 mm->cached_hole_size = 0;
68438 goto full_search;
68439 }
68440 return -ENOMEM;
68441 }
68442- if (!vma || addr + len <= vma->vm_start) {
68443- /*
68444- * Remember the place where we stopped the search:
68445- */
68446- mm->free_area_cache = addr + len;
68447- return addr;
68448- }
68449+ if (check_heap_stack_gap(vma, addr, len))
68450+ break;
68451 if (addr + mm->cached_hole_size < vma->vm_start)
68452 mm->cached_hole_size = vma->vm_start - addr;
68453 addr = vma->vm_end;
68454 }
68455+
68456+ /*
68457+ * Remember the place where we stopped the search:
68458+ */
68459+ mm->free_area_cache = addr + len;
68460+ return addr;
68461 }
68462 #endif
68463
68464 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
68465 {
68466+
68467+#ifdef CONFIG_PAX_SEGMEXEC
68468+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68469+ return;
68470+#endif
68471+
68472 /*
68473 * Is this a new hole at the lowest possible address?
68474 */
68475- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
68476+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
68477 mm->free_area_cache = addr;
68478 mm->cached_hole_size = ~0UL;
68479 }
68480@@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
68481 {
68482 struct vm_area_struct *vma;
68483 struct mm_struct *mm = current->mm;
68484- unsigned long addr = addr0;
68485+ unsigned long base = mm->mmap_base, addr = addr0;
68486
68487 /* requested length too big for entire address space */
68488 if (len > TASK_SIZE)
68489@@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
68490 if (flags & MAP_FIXED)
68491 return addr;
68492
68493+#ifdef CONFIG_PAX_RANDMMAP
68494+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68495+#endif
68496+
68497 /* requesting a specific address */
68498 if (addr) {
68499 addr = PAGE_ALIGN(addr);
68500- vma = find_vma(mm, addr);
68501- if (TASK_SIZE - len >= addr &&
68502- (!vma || addr + len <= vma->vm_start))
68503- return addr;
68504+ if (TASK_SIZE - len >= addr) {
68505+ vma = find_vma(mm, addr);
68506+ if (check_heap_stack_gap(vma, addr, len))
68507+ return addr;
68508+ }
68509 }
68510
68511 /* check if free_area_cache is useful for us */
68512@@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
68513 /* make sure it can fit in the remaining address space */
68514 if (addr > len) {
68515 vma = find_vma(mm, addr-len);
68516- if (!vma || addr <= vma->vm_start)
68517+ if (check_heap_stack_gap(vma, addr - len, len))
68518 /* remember the address as a hint for next time */
68519 return (mm->free_area_cache = addr-len);
68520 }
68521@@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
68522 * return with success:
68523 */
68524 vma = find_vma(mm, addr);
68525- if (!vma || addr+len <= vma->vm_start)
68526+ if (check_heap_stack_gap(vma, addr, len))
68527 /* remember the address as a hint for next time */
68528 return (mm->free_area_cache = addr);
68529
68530@@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
68531 mm->cached_hole_size = vma->vm_start - addr;
68532
68533 /* try just below the current vma->vm_start */
68534- addr = vma->vm_start-len;
68535- } while (len < vma->vm_start);
68536+ addr = skip_heap_stack_gap(vma, len);
68537+ } while (!IS_ERR_VALUE(addr));
68538
68539 bottomup:
68540 /*
68541@@ -1414,13 +1624,21 @@ bottomup:
68542 * can happen with large stack limits and large mmap()
68543 * allocations.
68544 */
68545+ mm->mmap_base = TASK_UNMAPPED_BASE;
68546+
68547+#ifdef CONFIG_PAX_RANDMMAP
68548+ if (mm->pax_flags & MF_PAX_RANDMMAP)
68549+ mm->mmap_base += mm->delta_mmap;
68550+#endif
68551+
68552+ mm->free_area_cache = mm->mmap_base;
68553 mm->cached_hole_size = ~0UL;
68554- mm->free_area_cache = TASK_UNMAPPED_BASE;
68555 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
68556 /*
68557 * Restore the topdown base:
68558 */
68559- mm->free_area_cache = mm->mmap_base;
68560+ mm->mmap_base = base;
68561+ mm->free_area_cache = base;
68562 mm->cached_hole_size = ~0UL;
68563
68564 return addr;
68565@@ -1429,6 +1647,12 @@ bottomup:
68566
68567 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
68568 {
68569+
68570+#ifdef CONFIG_PAX_SEGMEXEC
68571+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68572+ return;
68573+#endif
68574+
68575 /*
68576 * Is this a new hole at the highest possible address?
68577 */
68578@@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
68579 mm->free_area_cache = addr;
68580
68581 /* dont allow allocations above current base */
68582- if (mm->free_area_cache > mm->mmap_base)
68583+ if (mm->free_area_cache > mm->mmap_base) {
68584 mm->free_area_cache = mm->mmap_base;
68585+ mm->cached_hole_size = ~0UL;
68586+ }
68587 }
68588
68589 unsigned long
68590@@ -1545,6 +1771,27 @@ out:
68591 return prev ? prev->vm_next : vma;
68592 }
68593
68594+#ifdef CONFIG_PAX_SEGMEXEC
68595+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
68596+{
68597+ struct vm_area_struct *vma_m;
68598+
68599+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
68600+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
68601+ BUG_ON(vma->vm_mirror);
68602+ return NULL;
68603+ }
68604+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
68605+ vma_m = vma->vm_mirror;
68606+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
68607+ BUG_ON(vma->vm_file != vma_m->vm_file);
68608+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
68609+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
68610+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
68611+ return vma_m;
68612+}
68613+#endif
68614+
68615 /*
68616 * Verify that the stack growth is acceptable and
68617 * update accounting. This is shared with both the
68618@@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
68619 return -ENOMEM;
68620
68621 /* Stack limit test */
68622+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
68623 if (size > rlim[RLIMIT_STACK].rlim_cur)
68624 return -ENOMEM;
68625
68626@@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
68627 unsigned long limit;
68628 locked = mm->locked_vm + grow;
68629 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
68630+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68631 if (locked > limit && !capable(CAP_IPC_LOCK))
68632 return -ENOMEM;
68633 }
68634@@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
68635 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
68636 * vma is the last one with address > vma->vm_end. Have to extend vma.
68637 */
68638+#ifndef CONFIG_IA64
68639+static
68640+#endif
68641 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
68642 {
68643 int error;
68644+ bool locknext;
68645
68646 if (!(vma->vm_flags & VM_GROWSUP))
68647 return -EFAULT;
68648
68649+ /* Also guard against wrapping around to address 0. */
68650+ if (address < PAGE_ALIGN(address+1))
68651+ address = PAGE_ALIGN(address+1);
68652+ else
68653+ return -ENOMEM;
68654+
68655 /*
68656 * We must make sure the anon_vma is allocated
68657 * so that the anon_vma locking is not a noop.
68658 */
68659 if (unlikely(anon_vma_prepare(vma)))
68660 return -ENOMEM;
68661+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
68662+ if (locknext && anon_vma_prepare(vma->vm_next))
68663+ return -ENOMEM;
68664 anon_vma_lock(vma);
68665+ if (locknext)
68666+ anon_vma_lock(vma->vm_next);
68667
68668 /*
68669 * vma->vm_start/vm_end cannot change under us because the caller
68670 * is required to hold the mmap_sem in read mode. We need the
68671- * anon_vma lock to serialize against concurrent expand_stacks.
68672- * Also guard against wrapping around to address 0.
68673+ * anon_vma locks to serialize against concurrent expand_stacks
68674+ * and expand_upwards.
68675 */
68676- if (address < PAGE_ALIGN(address+4))
68677- address = PAGE_ALIGN(address+4);
68678- else {
68679- anon_vma_unlock(vma);
68680- return -ENOMEM;
68681- }
68682 error = 0;
68683
68684 /* Somebody else might have raced and expanded it already */
68685- if (address > vma->vm_end) {
68686+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
68687+ error = -ENOMEM;
68688+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
68689 unsigned long size, grow;
68690
68691 size = address - vma->vm_start;
68692@@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
68693 if (!error)
68694 vma->vm_end = address;
68695 }
68696+ if (locknext)
68697+ anon_vma_unlock(vma->vm_next);
68698 anon_vma_unlock(vma);
68699 return error;
68700 }
68701@@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
68702 unsigned long address)
68703 {
68704 int error;
68705+ bool lockprev = false;
68706+ struct vm_area_struct *prev;
68707
68708 /*
68709 * We must make sure the anon_vma is allocated
68710@@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
68711 if (error)
68712 return error;
68713
68714+ prev = vma->vm_prev;
68715+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
68716+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
68717+#endif
68718+ if (lockprev && anon_vma_prepare(prev))
68719+ return -ENOMEM;
68720+ if (lockprev)
68721+ anon_vma_lock(prev);
68722+
68723 anon_vma_lock(vma);
68724
68725 /*
68726@@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
68727 */
68728
68729 /* Somebody else might have raced and expanded it already */
68730- if (address < vma->vm_start) {
68731+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
68732+ error = -ENOMEM;
68733+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
68734 unsigned long size, grow;
68735
68736+#ifdef CONFIG_PAX_SEGMEXEC
68737+ struct vm_area_struct *vma_m;
68738+
68739+ vma_m = pax_find_mirror_vma(vma);
68740+#endif
68741+
68742 size = vma->vm_end - address;
68743 grow = (vma->vm_start - address) >> PAGE_SHIFT;
68744
68745@@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
68746 if (!error) {
68747 vma->vm_start = address;
68748 vma->vm_pgoff -= grow;
68749+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
68750+
68751+#ifdef CONFIG_PAX_SEGMEXEC
68752+ if (vma_m) {
68753+ vma_m->vm_start -= grow << PAGE_SHIFT;
68754+ vma_m->vm_pgoff -= grow;
68755+ }
68756+#endif
68757+
68758 }
68759 }
68760 anon_vma_unlock(vma);
68761+ if (lockprev)
68762+ anon_vma_unlock(prev);
68763 return error;
68764 }
68765
68766@@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
68767 do {
68768 long nrpages = vma_pages(vma);
68769
68770+#ifdef CONFIG_PAX_SEGMEXEC
68771+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
68772+ vma = remove_vma(vma);
68773+ continue;
68774+ }
68775+#endif
68776+
68777 mm->total_vm -= nrpages;
68778 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
68779 vma = remove_vma(vma);
68780@@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
68781 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
68782 vma->vm_prev = NULL;
68783 do {
68784+
68785+#ifdef CONFIG_PAX_SEGMEXEC
68786+ if (vma->vm_mirror) {
68787+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
68788+ vma->vm_mirror->vm_mirror = NULL;
68789+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
68790+ vma->vm_mirror = NULL;
68791+ }
68792+#endif
68793+
68794 rb_erase(&vma->vm_rb, &mm->mm_rb);
68795 mm->map_count--;
68796 tail_vma = vma;
68797@@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
68798 struct mempolicy *pol;
68799 struct vm_area_struct *new;
68800
68801+#ifdef CONFIG_PAX_SEGMEXEC
68802+ struct vm_area_struct *vma_m, *new_m = NULL;
68803+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
68804+#endif
68805+
68806 if (is_vm_hugetlb_page(vma) && (addr &
68807 ~(huge_page_mask(hstate_vma(vma)))))
68808 return -EINVAL;
68809
68810+#ifdef CONFIG_PAX_SEGMEXEC
68811+ vma_m = pax_find_mirror_vma(vma);
68812+
68813+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68814+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
68815+ if (mm->map_count >= sysctl_max_map_count-1)
68816+ return -ENOMEM;
68817+ } else
68818+#endif
68819+
68820 if (mm->map_count >= sysctl_max_map_count)
68821 return -ENOMEM;
68822
68823@@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
68824 if (!new)
68825 return -ENOMEM;
68826
68827+#ifdef CONFIG_PAX_SEGMEXEC
68828+ if (vma_m) {
68829+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
68830+ if (!new_m) {
68831+ kmem_cache_free(vm_area_cachep, new);
68832+ return -ENOMEM;
68833+ }
68834+ }
68835+#endif
68836+
68837 /* most fields are the same, copy all, and then fixup */
68838 *new = *vma;
68839
68840@@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
68841 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
68842 }
68843
68844+#ifdef CONFIG_PAX_SEGMEXEC
68845+ if (vma_m) {
68846+ *new_m = *vma_m;
68847+ new_m->vm_mirror = new;
68848+ new->vm_mirror = new_m;
68849+
68850+ if (new_below)
68851+ new_m->vm_end = addr_m;
68852+ else {
68853+ new_m->vm_start = addr_m;
68854+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
68855+ }
68856+ }
68857+#endif
68858+
68859 pol = mpol_dup(vma_policy(vma));
68860 if (IS_ERR(pol)) {
68861+
68862+#ifdef CONFIG_PAX_SEGMEXEC
68863+ if (new_m)
68864+ kmem_cache_free(vm_area_cachep, new_m);
68865+#endif
68866+
68867 kmem_cache_free(vm_area_cachep, new);
68868 return PTR_ERR(pol);
68869 }
68870@@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
68871 else
68872 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
68873
68874+#ifdef CONFIG_PAX_SEGMEXEC
68875+ if (vma_m) {
68876+ mpol_get(pol);
68877+ vma_set_policy(new_m, pol);
68878+
68879+ if (new_m->vm_file) {
68880+ get_file(new_m->vm_file);
68881+ if (vma_m->vm_flags & VM_EXECUTABLE)
68882+ added_exe_file_vma(mm);
68883+ }
68884+
68885+ if (new_m->vm_ops && new_m->vm_ops->open)
68886+ new_m->vm_ops->open(new_m);
68887+
68888+ if (new_below)
68889+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
68890+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
68891+ else
68892+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
68893+ }
68894+#endif
68895+
68896 return 0;
68897 }
68898
68899@@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
68900 * work. This now handles partial unmappings.
68901 * Jeremy Fitzhardinge <jeremy@goop.org>
68902 */
68903+#ifdef CONFIG_PAX_SEGMEXEC
68904+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68905+{
68906+ int ret = __do_munmap(mm, start, len);
68907+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
68908+ return ret;
68909+
68910+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
68911+}
68912+
68913+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68914+#else
68915 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
68916+#endif
68917 {
68918 unsigned long end;
68919 struct vm_area_struct *vma, *prev, *last;
68920
68921+ /*
68922+ * mm->mmap_sem is required to protect against another thread
68923+ * changing the mappings in case we sleep.
68924+ */
68925+ verify_mm_writelocked(mm);
68926+
68927 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
68928 return -EINVAL;
68929
68930@@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
68931 /* Fix up all other VM information */
68932 remove_vma_list(mm, vma);
68933
68934+ track_exec_limit(mm, start, end, 0UL);
68935+
68936 return 0;
68937 }
68938
68939@@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
68940
68941 profile_munmap(addr);
68942
68943+#ifdef CONFIG_PAX_SEGMEXEC
68944+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
68945+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
68946+ return -EINVAL;
68947+#endif
68948+
68949 down_write(&mm->mmap_sem);
68950 ret = do_munmap(mm, addr, len);
68951 up_write(&mm->mmap_sem);
68952 return ret;
68953 }
68954
68955-static inline void verify_mm_writelocked(struct mm_struct *mm)
68956-{
68957-#ifdef CONFIG_DEBUG_VM
68958- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68959- WARN_ON(1);
68960- up_read(&mm->mmap_sem);
68961- }
68962-#endif
68963-}
68964-
68965 /*
68966 * this is really a simplified "do_mmap". it only handles
68967 * anonymous maps. eventually we may be able to do some
68968@@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
68969 struct rb_node ** rb_link, * rb_parent;
68970 pgoff_t pgoff = addr >> PAGE_SHIFT;
68971 int error;
68972+ unsigned long charged;
68973
68974 len = PAGE_ALIGN(len);
68975 if (!len)
68976@@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
68977
68978 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
68979
68980+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
68981+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
68982+ flags &= ~VM_EXEC;
68983+
68984+#ifdef CONFIG_PAX_MPROTECT
68985+ if (mm->pax_flags & MF_PAX_MPROTECT)
68986+ flags &= ~VM_MAYEXEC;
68987+#endif
68988+
68989+ }
68990+#endif
68991+
68992 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
68993 if (error & ~PAGE_MASK)
68994 return error;
68995
68996+ charged = len >> PAGE_SHIFT;
68997+
68998 /*
68999 * mlock MCL_FUTURE?
69000 */
69001 if (mm->def_flags & VM_LOCKED) {
69002 unsigned long locked, lock_limit;
69003- locked = len >> PAGE_SHIFT;
69004+ locked = charged;
69005 locked += mm->locked_vm;
69006 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
69007 lock_limit >>= PAGE_SHIFT;
69008@@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
69009 /*
69010 * Clear old maps. this also does some error checking for us
69011 */
69012- munmap_back:
69013 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69014 if (vma && vma->vm_start < addr + len) {
69015 if (do_munmap(mm, addr, len))
69016 return -ENOMEM;
69017- goto munmap_back;
69018+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69019+ BUG_ON(vma && vma->vm_start < addr + len);
69020 }
69021
69022 /* Check against address space limits *after* clearing old maps... */
69023- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69024+ if (!may_expand_vm(mm, charged))
69025 return -ENOMEM;
69026
69027 if (mm->map_count > sysctl_max_map_count)
69028 return -ENOMEM;
69029
69030- if (security_vm_enough_memory(len >> PAGE_SHIFT))
69031+ if (security_vm_enough_memory(charged))
69032 return -ENOMEM;
69033
69034 /* Can we just expand an old private anonymous mapping? */
69035@@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
69036 */
69037 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69038 if (!vma) {
69039- vm_unacct_memory(len >> PAGE_SHIFT);
69040+ vm_unacct_memory(charged);
69041 return -ENOMEM;
69042 }
69043
69044@@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
69045 vma->vm_page_prot = vm_get_page_prot(flags);
69046 vma_link(mm, vma, prev, rb_link, rb_parent);
69047 out:
69048- mm->total_vm += len >> PAGE_SHIFT;
69049+ mm->total_vm += charged;
69050 if (flags & VM_LOCKED) {
69051 if (!mlock_vma_pages_range(vma, addr, addr + len))
69052- mm->locked_vm += (len >> PAGE_SHIFT);
69053+ mm->locked_vm += charged;
69054 }
69055+ track_exec_limit(mm, addr, addr + len, flags);
69056 return addr;
69057 }
69058
69059@@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
69060 * Walk the list again, actually closing and freeing it,
69061 * with preemption enabled, without holding any MM locks.
69062 */
69063- while (vma)
69064+ while (vma) {
69065+ vma->vm_mirror = NULL;
69066 vma = remove_vma(vma);
69067+ }
69068
69069 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69070 }
69071@@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
69072 struct vm_area_struct * __vma, * prev;
69073 struct rb_node ** rb_link, * rb_parent;
69074
69075+#ifdef CONFIG_PAX_SEGMEXEC
69076+ struct vm_area_struct *vma_m = NULL;
69077+#endif
69078+
69079 /*
69080 * The vm_pgoff of a purely anonymous vma should be irrelevant
69081 * until its first write fault, when page's anon_vma and index
69082@@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
69083 if ((vma->vm_flags & VM_ACCOUNT) &&
69084 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69085 return -ENOMEM;
69086+
69087+#ifdef CONFIG_PAX_SEGMEXEC
69088+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69089+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69090+ if (!vma_m)
69091+ return -ENOMEM;
69092+ }
69093+#endif
69094+
69095 vma_link(mm, vma, prev, rb_link, rb_parent);
69096+
69097+#ifdef CONFIG_PAX_SEGMEXEC
69098+ if (vma_m)
69099+ pax_mirror_vma(vma_m, vma);
69100+#endif
69101+
69102 return 0;
69103 }
69104
69105@@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
69106 struct rb_node **rb_link, *rb_parent;
69107 struct mempolicy *pol;
69108
69109+ BUG_ON(vma->vm_mirror);
69110+
69111 /*
69112 * If anonymous vma has not yet been faulted, update new pgoff
69113 * to match new location, to increase its chance of merging.
69114@@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
69115 return new_vma;
69116 }
69117
69118+#ifdef CONFIG_PAX_SEGMEXEC
69119+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69120+{
69121+ struct vm_area_struct *prev_m;
69122+ struct rb_node **rb_link_m, *rb_parent_m;
69123+ struct mempolicy *pol_m;
69124+
69125+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69126+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69127+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69128+ *vma_m = *vma;
69129+ pol_m = vma_policy(vma_m);
69130+ mpol_get(pol_m);
69131+ vma_set_policy(vma_m, pol_m);
69132+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69133+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69134+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69135+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69136+ if (vma_m->vm_file)
69137+ get_file(vma_m->vm_file);
69138+ if (vma_m->vm_ops && vma_m->vm_ops->open)
69139+ vma_m->vm_ops->open(vma_m);
69140+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69141+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69142+ vma_m->vm_mirror = vma;
69143+ vma->vm_mirror = vma_m;
69144+}
69145+#endif
69146+
69147 /*
69148 * Return true if the calling process may expand its vm space by the passed
69149 * number of pages
69150@@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
69151 unsigned long lim;
69152
69153 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
69154-
69155+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69156 if (cur + npages > lim)
69157 return 0;
69158 return 1;
69159@@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
69160 vma->vm_start = addr;
69161 vma->vm_end = addr + len;
69162
69163+#ifdef CONFIG_PAX_MPROTECT
69164+ if (mm->pax_flags & MF_PAX_MPROTECT) {
69165+#ifndef CONFIG_PAX_MPROTECT_COMPAT
69166+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69167+ return -EPERM;
69168+ if (!(vm_flags & VM_EXEC))
69169+ vm_flags &= ~VM_MAYEXEC;
69170+#else
69171+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69172+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69173+#endif
69174+ else
69175+ vm_flags &= ~VM_MAYWRITE;
69176+ }
69177+#endif
69178+
69179 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69180 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69181
69182diff -urNp linux-2.6.32.46/mm/mprotect.c linux-2.6.32.46/mm/mprotect.c
69183--- linux-2.6.32.46/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
69184+++ linux-2.6.32.46/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
69185@@ -24,10 +24,16 @@
69186 #include <linux/mmu_notifier.h>
69187 #include <linux/migrate.h>
69188 #include <linux/perf_event.h>
69189+
69190+#ifdef CONFIG_PAX_MPROTECT
69191+#include <linux/elf.h>
69192+#endif
69193+
69194 #include <asm/uaccess.h>
69195 #include <asm/pgtable.h>
69196 #include <asm/cacheflush.h>
69197 #include <asm/tlbflush.h>
69198+#include <asm/mmu_context.h>
69199
69200 #ifndef pgprot_modify
69201 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69202@@ -132,6 +138,48 @@ static void change_protection(struct vm_
69203 flush_tlb_range(vma, start, end);
69204 }
69205
69206+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69207+/* called while holding the mmap semaphor for writing except stack expansion */
69208+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69209+{
69210+ unsigned long oldlimit, newlimit = 0UL;
69211+
69212+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
69213+ return;
69214+
69215+ spin_lock(&mm->page_table_lock);
69216+ oldlimit = mm->context.user_cs_limit;
69217+ if ((prot & VM_EXEC) && oldlimit < end)
69218+ /* USER_CS limit moved up */
69219+ newlimit = end;
69220+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69221+ /* USER_CS limit moved down */
69222+ newlimit = start;
69223+
69224+ if (newlimit) {
69225+ mm->context.user_cs_limit = newlimit;
69226+
69227+#ifdef CONFIG_SMP
69228+ wmb();
69229+ cpus_clear(mm->context.cpu_user_cs_mask);
69230+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69231+#endif
69232+
69233+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69234+ }
69235+ spin_unlock(&mm->page_table_lock);
69236+ if (newlimit == end) {
69237+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
69238+
69239+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
69240+ if (is_vm_hugetlb_page(vma))
69241+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69242+ else
69243+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69244+ }
69245+}
69246+#endif
69247+
69248 int
69249 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69250 unsigned long start, unsigned long end, unsigned long newflags)
69251@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
69252 int error;
69253 int dirty_accountable = 0;
69254
69255+#ifdef CONFIG_PAX_SEGMEXEC
69256+ struct vm_area_struct *vma_m = NULL;
69257+ unsigned long start_m, end_m;
69258+
69259+ start_m = start + SEGMEXEC_TASK_SIZE;
69260+ end_m = end + SEGMEXEC_TASK_SIZE;
69261+#endif
69262+
69263 if (newflags == oldflags) {
69264 *pprev = vma;
69265 return 0;
69266 }
69267
69268+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69269+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69270+
69271+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69272+ return -ENOMEM;
69273+
69274+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69275+ return -ENOMEM;
69276+ }
69277+
69278 /*
69279 * If we make a private mapping writable we increase our commit;
69280 * but (without finer accounting) cannot reduce our commit if we
69281@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
69282 }
69283 }
69284
69285+#ifdef CONFIG_PAX_SEGMEXEC
69286+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69287+ if (start != vma->vm_start) {
69288+ error = split_vma(mm, vma, start, 1);
69289+ if (error)
69290+ goto fail;
69291+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69292+ *pprev = (*pprev)->vm_next;
69293+ }
69294+
69295+ if (end != vma->vm_end) {
69296+ error = split_vma(mm, vma, end, 0);
69297+ if (error)
69298+ goto fail;
69299+ }
69300+
69301+ if (pax_find_mirror_vma(vma)) {
69302+ error = __do_munmap(mm, start_m, end_m - start_m);
69303+ if (error)
69304+ goto fail;
69305+ } else {
69306+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69307+ if (!vma_m) {
69308+ error = -ENOMEM;
69309+ goto fail;
69310+ }
69311+ vma->vm_flags = newflags;
69312+ pax_mirror_vma(vma_m, vma);
69313+ }
69314+ }
69315+#endif
69316+
69317 /*
69318 * First try to merge with previous and/or next vma.
69319 */
69320@@ -195,9 +293,21 @@ success:
69321 * vm_flags and vm_page_prot are protected by the mmap_sem
69322 * held in write mode.
69323 */
69324+
69325+#ifdef CONFIG_PAX_SEGMEXEC
69326+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69327+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69328+#endif
69329+
69330 vma->vm_flags = newflags;
69331+
69332+#ifdef CONFIG_PAX_MPROTECT
69333+ if (mm->binfmt && mm->binfmt->handle_mprotect)
69334+ mm->binfmt->handle_mprotect(vma, newflags);
69335+#endif
69336+
69337 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69338- vm_get_page_prot(newflags));
69339+ vm_get_page_prot(vma->vm_flags));
69340
69341 if (vma_wants_writenotify(vma)) {
69342 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69343@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69344 end = start + len;
69345 if (end <= start)
69346 return -ENOMEM;
69347+
69348+#ifdef CONFIG_PAX_SEGMEXEC
69349+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69350+ if (end > SEGMEXEC_TASK_SIZE)
69351+ return -EINVAL;
69352+ } else
69353+#endif
69354+
69355+ if (end > TASK_SIZE)
69356+ return -EINVAL;
69357+
69358 if (!arch_validate_prot(prot))
69359 return -EINVAL;
69360
69361@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69362 /*
69363 * Does the application expect PROT_READ to imply PROT_EXEC:
69364 */
69365- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69366+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69367 prot |= PROT_EXEC;
69368
69369 vm_flags = calc_vm_prot_bits(prot);
69370@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69371 if (start > vma->vm_start)
69372 prev = vma;
69373
69374+#ifdef CONFIG_PAX_MPROTECT
69375+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69376+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
69377+#endif
69378+
69379 for (nstart = start ; ; ) {
69380 unsigned long newflags;
69381
69382@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69383
69384 /* newflags >> 4 shift VM_MAY% in place of VM_% */
69385 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69386+ if (prot & (PROT_WRITE | PROT_EXEC))
69387+ gr_log_rwxmprotect(vma->vm_file);
69388+
69389+ error = -EACCES;
69390+ goto out;
69391+ }
69392+
69393+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69394 error = -EACCES;
69395 goto out;
69396 }
69397@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69398 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
69399 if (error)
69400 goto out;
69401+
69402+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
69403+
69404 nstart = tmp;
69405
69406 if (nstart < prev->vm_end)
69407diff -urNp linux-2.6.32.46/mm/mremap.c linux-2.6.32.46/mm/mremap.c
69408--- linux-2.6.32.46/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
69409+++ linux-2.6.32.46/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
69410@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
69411 continue;
69412 pte = ptep_clear_flush(vma, old_addr, old_pte);
69413 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
69414+
69415+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69416+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
69417+ pte = pte_exprotect(pte);
69418+#endif
69419+
69420 set_pte_at(mm, new_addr, new_pte, pte);
69421 }
69422
69423@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
69424 if (is_vm_hugetlb_page(vma))
69425 goto Einval;
69426
69427+#ifdef CONFIG_PAX_SEGMEXEC
69428+ if (pax_find_mirror_vma(vma))
69429+ goto Einval;
69430+#endif
69431+
69432 /* We can't remap across vm area boundaries */
69433 if (old_len > vma->vm_end - addr)
69434 goto Efault;
69435@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
69436 unsigned long ret = -EINVAL;
69437 unsigned long charged = 0;
69438 unsigned long map_flags;
69439+ unsigned long pax_task_size = TASK_SIZE;
69440
69441 if (new_addr & ~PAGE_MASK)
69442 goto out;
69443
69444- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
69445+#ifdef CONFIG_PAX_SEGMEXEC
69446+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69447+ pax_task_size = SEGMEXEC_TASK_SIZE;
69448+#endif
69449+
69450+ pax_task_size -= PAGE_SIZE;
69451+
69452+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
69453 goto out;
69454
69455 /* Check if the location we're moving into overlaps the
69456 * old location at all, and fail if it does.
69457 */
69458- if ((new_addr <= addr) && (new_addr+new_len) > addr)
69459- goto out;
69460-
69461- if ((addr <= new_addr) && (addr+old_len) > new_addr)
69462+ if (addr + old_len > new_addr && new_addr + new_len > addr)
69463 goto out;
69464
69465 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69466@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
69467 struct vm_area_struct *vma;
69468 unsigned long ret = -EINVAL;
69469 unsigned long charged = 0;
69470+ unsigned long pax_task_size = TASK_SIZE;
69471
69472 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
69473 goto out;
69474@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
69475 if (!new_len)
69476 goto out;
69477
69478+#ifdef CONFIG_PAX_SEGMEXEC
69479+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69480+ pax_task_size = SEGMEXEC_TASK_SIZE;
69481+#endif
69482+
69483+ pax_task_size -= PAGE_SIZE;
69484+
69485+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
69486+ old_len > pax_task_size || addr > pax_task_size-old_len)
69487+ goto out;
69488+
69489 if (flags & MREMAP_FIXED) {
69490 if (flags & MREMAP_MAYMOVE)
69491 ret = mremap_to(addr, old_len, new_addr, new_len);
69492@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
69493 addr + new_len);
69494 }
69495 ret = addr;
69496+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
69497 goto out;
69498 }
69499 }
69500@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
69501 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69502 if (ret)
69503 goto out;
69504+
69505+ map_flags = vma->vm_flags;
69506 ret = move_vma(vma, addr, old_len, new_len, new_addr);
69507+ if (!(ret & ~PAGE_MASK)) {
69508+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
69509+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
69510+ }
69511 }
69512 out:
69513 if (ret & ~PAGE_MASK)
69514diff -urNp linux-2.6.32.46/mm/nommu.c linux-2.6.32.46/mm/nommu.c
69515--- linux-2.6.32.46/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
69516+++ linux-2.6.32.46/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
69517@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
69518 int sysctl_overcommit_ratio = 50; /* default is 50% */
69519 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69520 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
69521-int heap_stack_gap = 0;
69522
69523 atomic_long_t mmap_pages_allocated;
69524
69525@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
69526 EXPORT_SYMBOL(find_vma);
69527
69528 /*
69529- * find a VMA
69530- * - we don't extend stack VMAs under NOMMU conditions
69531- */
69532-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
69533-{
69534- return find_vma(mm, addr);
69535-}
69536-
69537-/*
69538 * expand a stack to a given address
69539 * - not supported under NOMMU conditions
69540 */
69541diff -urNp linux-2.6.32.46/mm/page_alloc.c linux-2.6.32.46/mm/page_alloc.c
69542--- linux-2.6.32.46/mm/page_alloc.c 2011-06-25 12:55:35.000000000 -0400
69543+++ linux-2.6.32.46/mm/page_alloc.c 2011-07-09 09:13:08.000000000 -0400
69544@@ -289,7 +289,7 @@ out:
69545 * This usage means that zero-order pages may not be compound.
69546 */
69547
69548-static void free_compound_page(struct page *page)
69549+void free_compound_page(struct page *page)
69550 {
69551 __free_pages_ok(page, compound_order(page));
69552 }
69553@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
69554 int bad = 0;
69555 int wasMlocked = __TestClearPageMlocked(page);
69556
69557+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69558+ unsigned long index = 1UL << order;
69559+#endif
69560+
69561 kmemcheck_free_shadow(page, order);
69562
69563 for (i = 0 ; i < (1 << order) ; ++i)
69564@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
69565 debug_check_no_obj_freed(page_address(page),
69566 PAGE_SIZE << order);
69567 }
69568+
69569+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69570+ for (; index; --index)
69571+ sanitize_highpage(page + index - 1);
69572+#endif
69573+
69574 arch_free_page(page, order);
69575 kernel_map_pages(page, 1 << order, 0);
69576
69577@@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
69578 arch_alloc_page(page, order);
69579 kernel_map_pages(page, 1 << order, 1);
69580
69581+#ifndef CONFIG_PAX_MEMORY_SANITIZE
69582 if (gfp_flags & __GFP_ZERO)
69583 prep_zero_page(page, order, gfp_flags);
69584+#endif
69585
69586 if (order && (gfp_flags & __GFP_COMP))
69587 prep_compound_page(page, order);
69588@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
69589 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
69590 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
69591 }
69592+
69593+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69594+ sanitize_highpage(page);
69595+#endif
69596+
69597 arch_free_page(page, 0);
69598 kernel_map_pages(page, 1, 0);
69599
69600@@ -2179,6 +2196,8 @@ void show_free_areas(void)
69601 int cpu;
69602 struct zone *zone;
69603
69604+ pax_track_stack();
69605+
69606 for_each_populated_zone(zone) {
69607 show_node(zone);
69608 printk("%s per-cpu:\n", zone->name);
69609@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
69610 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
69611 }
69612 #else
69613-static void inline setup_usemap(struct pglist_data *pgdat,
69614+static inline void setup_usemap(struct pglist_data *pgdat,
69615 struct zone *zone, unsigned long zonesize) {}
69616 #endif /* CONFIG_SPARSEMEM */
69617
69618diff -urNp linux-2.6.32.46/mm/percpu.c linux-2.6.32.46/mm/percpu.c
69619--- linux-2.6.32.46/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
69620+++ linux-2.6.32.46/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
69621@@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
69622 static unsigned int pcpu_last_unit_cpu __read_mostly;
69623
69624 /* the address of the first chunk which starts with the kernel static area */
69625-void *pcpu_base_addr __read_mostly;
69626+void *pcpu_base_addr __read_only;
69627 EXPORT_SYMBOL_GPL(pcpu_base_addr);
69628
69629 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
69630diff -urNp linux-2.6.32.46/mm/rmap.c linux-2.6.32.46/mm/rmap.c
69631--- linux-2.6.32.46/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
69632+++ linux-2.6.32.46/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
69633@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
69634 /* page_table_lock to protect against threads */
69635 spin_lock(&mm->page_table_lock);
69636 if (likely(!vma->anon_vma)) {
69637+
69638+#ifdef CONFIG_PAX_SEGMEXEC
69639+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
69640+
69641+ if (vma_m) {
69642+ BUG_ON(vma_m->anon_vma);
69643+ vma_m->anon_vma = anon_vma;
69644+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
69645+ }
69646+#endif
69647+
69648 vma->anon_vma = anon_vma;
69649 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
69650 allocated = NULL;
69651diff -urNp linux-2.6.32.46/mm/shmem.c linux-2.6.32.46/mm/shmem.c
69652--- linux-2.6.32.46/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
69653+++ linux-2.6.32.46/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
69654@@ -31,7 +31,7 @@
69655 #include <linux/swap.h>
69656 #include <linux/ima.h>
69657
69658-static struct vfsmount *shm_mnt;
69659+struct vfsmount *shm_mnt;
69660
69661 #ifdef CONFIG_SHMEM
69662 /*
69663@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
69664 goto unlock;
69665 }
69666 entry = shmem_swp_entry(info, index, NULL);
69667+ if (!entry)
69668+ goto unlock;
69669 if (entry->val) {
69670 /*
69671 * The more uptodate page coming down from a stacked
69672@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
69673 struct vm_area_struct pvma;
69674 struct page *page;
69675
69676+ pax_track_stack();
69677+
69678 spol = mpol_cond_copy(&mpol,
69679 mpol_shared_policy_lookup(&info->policy, idx));
69680
69681@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
69682
69683 info = SHMEM_I(inode);
69684 inode->i_size = len-1;
69685- if (len <= (char *)inode - (char *)info) {
69686+ if (len <= (char *)inode - (char *)info && len <= 64) {
69687 /* do it inline */
69688 memcpy(info, symname, len);
69689 inode->i_op = &shmem_symlink_inline_operations;
69690@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
69691 int err = -ENOMEM;
69692
69693 /* Round up to L1_CACHE_BYTES to resist false sharing */
69694- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
69695- L1_CACHE_BYTES), GFP_KERNEL);
69696+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
69697 if (!sbinfo)
69698 return -ENOMEM;
69699
69700diff -urNp linux-2.6.32.46/mm/slab.c linux-2.6.32.46/mm/slab.c
69701--- linux-2.6.32.46/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
69702+++ linux-2.6.32.46/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
69703@@ -174,7 +174,7 @@
69704
69705 /* Legal flag mask for kmem_cache_create(). */
69706 #if DEBUG
69707-# define CREATE_MASK (SLAB_RED_ZONE | \
69708+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
69709 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
69710 SLAB_CACHE_DMA | \
69711 SLAB_STORE_USER | \
69712@@ -182,7 +182,7 @@
69713 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69714 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
69715 #else
69716-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
69717+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
69718 SLAB_CACHE_DMA | \
69719 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
69720 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
69721@@ -308,7 +308,7 @@ struct kmem_list3 {
69722 * Need this for bootstrapping a per node allocator.
69723 */
69724 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
69725-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
69726+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
69727 #define CACHE_CACHE 0
69728 #define SIZE_AC MAX_NUMNODES
69729 #define SIZE_L3 (2 * MAX_NUMNODES)
69730@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
69731 if ((x)->max_freeable < i) \
69732 (x)->max_freeable = i; \
69733 } while (0)
69734-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
69735-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
69736-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
69737-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
69738+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
69739+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
69740+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
69741+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
69742 #else
69743 #define STATS_INC_ACTIVE(x) do { } while (0)
69744 #define STATS_DEC_ACTIVE(x) do { } while (0)
69745@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
69746 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
69747 */
69748 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
69749- const struct slab *slab, void *obj)
69750+ const struct slab *slab, const void *obj)
69751 {
69752 u32 offset = (obj - slab->s_mem);
69753 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
69754@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
69755 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
69756 sizes[INDEX_AC].cs_size,
69757 ARCH_KMALLOC_MINALIGN,
69758- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69759+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69760 NULL);
69761
69762 if (INDEX_AC != INDEX_L3) {
69763@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
69764 kmem_cache_create(names[INDEX_L3].name,
69765 sizes[INDEX_L3].cs_size,
69766 ARCH_KMALLOC_MINALIGN,
69767- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69768+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69769 NULL);
69770 }
69771
69772@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
69773 sizes->cs_cachep = kmem_cache_create(names->name,
69774 sizes->cs_size,
69775 ARCH_KMALLOC_MINALIGN,
69776- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
69777+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
69778 NULL);
69779 }
69780 #ifdef CONFIG_ZONE_DMA
69781@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
69782 }
69783 /* cpu stats */
69784 {
69785- unsigned long allochit = atomic_read(&cachep->allochit);
69786- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
69787- unsigned long freehit = atomic_read(&cachep->freehit);
69788- unsigned long freemiss = atomic_read(&cachep->freemiss);
69789+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
69790+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
69791+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
69792+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
69793
69794 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
69795 allochit, allocmiss, freehit, freemiss);
69796@@ -4471,15 +4471,66 @@ static const struct file_operations proc
69797
69798 static int __init slab_proc_init(void)
69799 {
69800- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
69801+ mode_t gr_mode = S_IRUGO;
69802+
69803+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69804+ gr_mode = S_IRUSR;
69805+#endif
69806+
69807+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
69808 #ifdef CONFIG_DEBUG_SLAB_LEAK
69809- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
69810+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
69811 #endif
69812 return 0;
69813 }
69814 module_init(slab_proc_init);
69815 #endif
69816
69817+void check_object_size(const void *ptr, unsigned long n, bool to)
69818+{
69819+
69820+#ifdef CONFIG_PAX_USERCOPY
69821+ struct page *page;
69822+ struct kmem_cache *cachep = NULL;
69823+ struct slab *slabp;
69824+ unsigned int objnr;
69825+ unsigned long offset;
69826+
69827+ if (!n)
69828+ return;
69829+
69830+ if (ZERO_OR_NULL_PTR(ptr))
69831+ goto report;
69832+
69833+ if (!virt_addr_valid(ptr))
69834+ return;
69835+
69836+ page = virt_to_head_page(ptr);
69837+
69838+ if (!PageSlab(page)) {
69839+ if (object_is_on_stack(ptr, n) == -1)
69840+ goto report;
69841+ return;
69842+ }
69843+
69844+ cachep = page_get_cache(page);
69845+ if (!(cachep->flags & SLAB_USERCOPY))
69846+ goto report;
69847+
69848+ slabp = page_get_slab(page);
69849+ objnr = obj_to_index(cachep, slabp, ptr);
69850+ BUG_ON(objnr >= cachep->num);
69851+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
69852+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
69853+ return;
69854+
69855+report:
69856+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
69857+#endif
69858+
69859+}
69860+EXPORT_SYMBOL(check_object_size);
69861+
69862 /**
69863 * ksize - get the actual amount of memory allocated for a given object
69864 * @objp: Pointer to the object
69865diff -urNp linux-2.6.32.46/mm/slob.c linux-2.6.32.46/mm/slob.c
69866--- linux-2.6.32.46/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
69867+++ linux-2.6.32.46/mm/slob.c 2011-07-06 19:53:33.000000000 -0400
69868@@ -29,7 +29,7 @@
69869 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
69870 * alloc_pages() directly, allocating compound pages so the page order
69871 * does not have to be separately tracked, and also stores the exact
69872- * allocation size in page->private so that it can be used to accurately
69873+ * allocation size in slob_page->size so that it can be used to accurately
69874 * provide ksize(). These objects are detected in kfree() because slob_page()
69875 * is false for them.
69876 *
69877@@ -58,6 +58,7 @@
69878 */
69879
69880 #include <linux/kernel.h>
69881+#include <linux/sched.h>
69882 #include <linux/slab.h>
69883 #include <linux/mm.h>
69884 #include <linux/swap.h> /* struct reclaim_state */
69885@@ -100,7 +101,8 @@ struct slob_page {
69886 unsigned long flags; /* mandatory */
69887 atomic_t _count; /* mandatory */
69888 slobidx_t units; /* free units left in page */
69889- unsigned long pad[2];
69890+ unsigned long pad[1];
69891+ unsigned long size; /* size when >=PAGE_SIZE */
69892 slob_t *free; /* first free slob_t in page */
69893 struct list_head list; /* linked list of free pages */
69894 };
69895@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
69896 */
69897 static inline int is_slob_page(struct slob_page *sp)
69898 {
69899- return PageSlab((struct page *)sp);
69900+ return PageSlab((struct page *)sp) && !sp->size;
69901 }
69902
69903 static inline void set_slob_page(struct slob_page *sp)
69904@@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
69905
69906 static inline struct slob_page *slob_page(const void *addr)
69907 {
69908- return (struct slob_page *)virt_to_page(addr);
69909+ return (struct slob_page *)virt_to_head_page(addr);
69910 }
69911
69912 /*
69913@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
69914 /*
69915 * Return the size of a slob block.
69916 */
69917-static slobidx_t slob_units(slob_t *s)
69918+static slobidx_t slob_units(const slob_t *s)
69919 {
69920 if (s->units > 0)
69921 return s->units;
69922@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
69923 /*
69924 * Return the next free slob block pointer after this one.
69925 */
69926-static slob_t *slob_next(slob_t *s)
69927+static slob_t *slob_next(const slob_t *s)
69928 {
69929 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
69930 slobidx_t next;
69931@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
69932 /*
69933 * Returns true if s is the last free block in its page.
69934 */
69935-static int slob_last(slob_t *s)
69936+static int slob_last(const slob_t *s)
69937 {
69938 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
69939 }
69940@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
69941 if (!page)
69942 return NULL;
69943
69944+ set_slob_page(page);
69945 return page_address(page);
69946 }
69947
69948@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
69949 if (!b)
69950 return NULL;
69951 sp = slob_page(b);
69952- set_slob_page(sp);
69953
69954 spin_lock_irqsave(&slob_lock, flags);
69955 sp->units = SLOB_UNITS(PAGE_SIZE);
69956 sp->free = b;
69957+ sp->size = 0;
69958 INIT_LIST_HEAD(&sp->list);
69959 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
69960 set_slob_page_free(sp, slob_list);
69961@@ -475,10 +478,9 @@ out:
69962 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
69963 #endif
69964
69965-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
69966+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
69967 {
69968- unsigned int *m;
69969- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
69970+ slob_t *m;
69971 void *ret;
69972
69973 lockdep_trace_alloc(gfp);
69974@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
69975
69976 if (!m)
69977 return NULL;
69978- *m = size;
69979+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
69980+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
69981+ m[0].units = size;
69982+ m[1].units = align;
69983 ret = (void *)m + align;
69984
69985 trace_kmalloc_node(_RET_IP_, ret,
69986@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t
69987
69988 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
69989 if (ret) {
69990- struct page *page;
69991- page = virt_to_page(ret);
69992- page->private = size;
69993+ struct slob_page *sp;
69994+ sp = slob_page(ret);
69995+ sp->size = size;
69996 }
69997
69998 trace_kmalloc_node(_RET_IP_, ret,
69999 size, PAGE_SIZE << order, gfp, node);
70000 }
70001
70002- kmemleak_alloc(ret, size, 1, gfp);
70003+ return ret;
70004+}
70005+
70006+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70007+{
70008+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70009+ void *ret = __kmalloc_node_align(size, gfp, node, align);
70010+
70011+ if (!ZERO_OR_NULL_PTR(ret))
70012+ kmemleak_alloc(ret, size, 1, gfp);
70013 return ret;
70014 }
70015 EXPORT_SYMBOL(__kmalloc_node);
70016@@ -528,13 +542,88 @@ void kfree(const void *block)
70017 sp = slob_page(block);
70018 if (is_slob_page(sp)) {
70019 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70020- unsigned int *m = (unsigned int *)(block - align);
70021- slob_free(m, *m + align);
70022- } else
70023+ slob_t *m = (slob_t *)(block - align);
70024+ slob_free(m, m[0].units + align);
70025+ } else {
70026+ clear_slob_page(sp);
70027+ free_slob_page(sp);
70028+ sp->size = 0;
70029 put_page(&sp->page);
70030+ }
70031 }
70032 EXPORT_SYMBOL(kfree);
70033
70034+void check_object_size(const void *ptr, unsigned long n, bool to)
70035+{
70036+
70037+#ifdef CONFIG_PAX_USERCOPY
70038+ struct slob_page *sp;
70039+ const slob_t *free;
70040+ const void *base;
70041+ unsigned long flags;
70042+
70043+ if (!n)
70044+ return;
70045+
70046+ if (ZERO_OR_NULL_PTR(ptr))
70047+ goto report;
70048+
70049+ if (!virt_addr_valid(ptr))
70050+ return;
70051+
70052+ sp = slob_page(ptr);
70053+ if (!PageSlab((struct page*)sp)) {
70054+ if (object_is_on_stack(ptr, n) == -1)
70055+ goto report;
70056+ return;
70057+ }
70058+
70059+ if (sp->size) {
70060+ base = page_address(&sp->page);
70061+ if (base <= ptr && n <= sp->size - (ptr - base))
70062+ return;
70063+ goto report;
70064+ }
70065+
70066+ /* some tricky double walking to find the chunk */
70067+ spin_lock_irqsave(&slob_lock, flags);
70068+ base = (void *)((unsigned long)ptr & PAGE_MASK);
70069+ free = sp->free;
70070+
70071+ while (!slob_last(free) && (void *)free <= ptr) {
70072+ base = free + slob_units(free);
70073+ free = slob_next(free);
70074+ }
70075+
70076+ while (base < (void *)free) {
70077+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70078+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
70079+ int offset;
70080+
70081+ if (ptr < base + align)
70082+ break;
70083+
70084+ offset = ptr - base - align;
70085+ if (offset >= m) {
70086+ base += size;
70087+ continue;
70088+ }
70089+
70090+ if (n > m - offset)
70091+ break;
70092+
70093+ spin_unlock_irqrestore(&slob_lock, flags);
70094+ return;
70095+ }
70096+
70097+ spin_unlock_irqrestore(&slob_lock, flags);
70098+report:
70099+ pax_report_usercopy(ptr, n, to, NULL);
70100+#endif
70101+
70102+}
70103+EXPORT_SYMBOL(check_object_size);
70104+
70105 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70106 size_t ksize(const void *block)
70107 {
70108@@ -547,10 +636,10 @@ size_t ksize(const void *block)
70109 sp = slob_page(block);
70110 if (is_slob_page(sp)) {
70111 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70112- unsigned int *m = (unsigned int *)(block - align);
70113- return SLOB_UNITS(*m) * SLOB_UNIT;
70114+ slob_t *m = (slob_t *)(block - align);
70115+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70116 } else
70117- return sp->page.private;
70118+ return sp->size;
70119 }
70120 EXPORT_SYMBOL(ksize);
70121
70122@@ -566,8 +655,13 @@ struct kmem_cache *kmem_cache_create(con
70123 {
70124 struct kmem_cache *c;
70125
70126+#ifdef CONFIG_PAX_USERCOPY
70127+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
70128+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70129+#else
70130 c = slob_alloc(sizeof(struct kmem_cache),
70131 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70132+#endif
70133
70134 if (c) {
70135 c->name = name;
70136@@ -605,17 +699,25 @@ void *kmem_cache_alloc_node(struct kmem_
70137 {
70138 void *b;
70139
70140+#ifdef CONFIG_PAX_USERCOPY
70141+ b = __kmalloc_node_align(c->size, flags, node, c->align);
70142+#else
70143 if (c->size < PAGE_SIZE) {
70144 b = slob_alloc(c->size, flags, c->align, node);
70145 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70146 SLOB_UNITS(c->size) * SLOB_UNIT,
70147 flags, node);
70148 } else {
70149+ struct slob_page *sp;
70150+
70151 b = slob_new_pages(flags, get_order(c->size), node);
70152+ sp = slob_page(b);
70153+ sp->size = c->size;
70154 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70155 PAGE_SIZE << get_order(c->size),
70156 flags, node);
70157 }
70158+#endif
70159
70160 if (c->ctor)
70161 c->ctor(b);
70162@@ -627,10 +729,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70163
70164 static void __kmem_cache_free(void *b, int size)
70165 {
70166- if (size < PAGE_SIZE)
70167+ struct slob_page *sp = slob_page(b);
70168+
70169+ if (is_slob_page(sp))
70170 slob_free(b, size);
70171- else
70172+ else {
70173+ clear_slob_page(sp);
70174+ free_slob_page(sp);
70175+ sp->size = 0;
70176 slob_free_pages(b, get_order(size));
70177+ }
70178 }
70179
70180 static void kmem_rcu_free(struct rcu_head *head)
70181@@ -643,18 +751,32 @@ static void kmem_rcu_free(struct rcu_hea
70182
70183 void kmem_cache_free(struct kmem_cache *c, void *b)
70184 {
70185+ int size = c->size;
70186+
70187+#ifdef CONFIG_PAX_USERCOPY
70188+ if (size + c->align < PAGE_SIZE) {
70189+ size += c->align;
70190+ b -= c->align;
70191+ }
70192+#endif
70193+
70194 kmemleak_free_recursive(b, c->flags);
70195 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70196 struct slob_rcu *slob_rcu;
70197- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70198+ slob_rcu = b + (size - sizeof(struct slob_rcu));
70199 INIT_RCU_HEAD(&slob_rcu->head);
70200- slob_rcu->size = c->size;
70201+ slob_rcu->size = size;
70202 call_rcu(&slob_rcu->head, kmem_rcu_free);
70203 } else {
70204- __kmem_cache_free(b, c->size);
70205+ __kmem_cache_free(b, size);
70206 }
70207
70208+#ifdef CONFIG_PAX_USERCOPY
70209+ trace_kfree(_RET_IP_, b);
70210+#else
70211 trace_kmem_cache_free(_RET_IP_, b);
70212+#endif
70213+
70214 }
70215 EXPORT_SYMBOL(kmem_cache_free);
70216
70217diff -urNp linux-2.6.32.46/mm/slub.c linux-2.6.32.46/mm/slub.c
70218--- linux-2.6.32.46/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
70219+++ linux-2.6.32.46/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
70220@@ -410,7 +410,7 @@ static void print_track(const char *s, s
70221 if (!t->addr)
70222 return;
70223
70224- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70225+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70226 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70227 }
70228
70229@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
70230
70231 page = virt_to_head_page(x);
70232
70233+ BUG_ON(!PageSlab(page));
70234+
70235 slab_free(s, page, x, _RET_IP_);
70236
70237 trace_kmem_cache_free(_RET_IP_, x);
70238@@ -1937,7 +1939,7 @@ static int slub_min_objects;
70239 * Merge control. If this is set then no merging of slab caches will occur.
70240 * (Could be removed. This was introduced to pacify the merge skeptics.)
70241 */
70242-static int slub_nomerge;
70243+static int slub_nomerge = 1;
70244
70245 /*
70246 * Calculate the order of allocation given an slab object size.
70247@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
70248 * list to avoid pounding the page allocator excessively.
70249 */
70250 set_min_partial(s, ilog2(s->size));
70251- s->refcount = 1;
70252+ atomic_set(&s->refcount, 1);
70253 #ifdef CONFIG_NUMA
70254 s->remote_node_defrag_ratio = 1000;
70255 #endif
70256@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
70257 void kmem_cache_destroy(struct kmem_cache *s)
70258 {
70259 down_write(&slub_lock);
70260- s->refcount--;
70261- if (!s->refcount) {
70262+ if (atomic_dec_and_test(&s->refcount)) {
70263 list_del(&s->list);
70264 up_write(&slub_lock);
70265 if (kmem_cache_close(s)) {
70266@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
70267 __setup("slub_nomerge", setup_slub_nomerge);
70268
70269 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
70270- const char *name, int size, gfp_t gfp_flags)
70271+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
70272 {
70273- unsigned int flags = 0;
70274-
70275 if (gfp_flags & SLUB_DMA)
70276- flags = SLAB_CACHE_DMA;
70277+ flags |= SLAB_CACHE_DMA;
70278
70279 /*
70280 * This function is called with IRQs disabled during early-boot on
70281@@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
70282 EXPORT_SYMBOL(__kmalloc_node);
70283 #endif
70284
70285+void check_object_size(const void *ptr, unsigned long n, bool to)
70286+{
70287+
70288+#ifdef CONFIG_PAX_USERCOPY
70289+ struct page *page;
70290+ struct kmem_cache *s = NULL;
70291+ unsigned long offset;
70292+
70293+ if (!n)
70294+ return;
70295+
70296+ if (ZERO_OR_NULL_PTR(ptr))
70297+ goto report;
70298+
70299+ if (!virt_addr_valid(ptr))
70300+ return;
70301+
70302+ page = get_object_page(ptr);
70303+
70304+ if (!page) {
70305+ if (object_is_on_stack(ptr, n) == -1)
70306+ goto report;
70307+ return;
70308+ }
70309+
70310+ s = page->slab;
70311+ if (!(s->flags & SLAB_USERCOPY))
70312+ goto report;
70313+
70314+ offset = (ptr - page_address(page)) % s->size;
70315+ if (offset <= s->objsize && n <= s->objsize - offset)
70316+ return;
70317+
70318+report:
70319+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
70320+#endif
70321+
70322+}
70323+EXPORT_SYMBOL(check_object_size);
70324+
70325 size_t ksize(const void *object)
70326 {
70327 struct page *page;
70328@@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
70329 * kmem_cache_open for slab_state == DOWN.
70330 */
70331 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
70332- sizeof(struct kmem_cache_node), GFP_NOWAIT);
70333- kmalloc_caches[0].refcount = -1;
70334+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
70335+ atomic_set(&kmalloc_caches[0].refcount, -1);
70336 caches++;
70337
70338 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
70339@@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
70340 /* Caches that are not of the two-to-the-power-of size */
70341 if (KMALLOC_MIN_SIZE <= 32) {
70342 create_kmalloc_cache(&kmalloc_caches[1],
70343- "kmalloc-96", 96, GFP_NOWAIT);
70344+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
70345 caches++;
70346 }
70347 if (KMALLOC_MIN_SIZE <= 64) {
70348 create_kmalloc_cache(&kmalloc_caches[2],
70349- "kmalloc-192", 192, GFP_NOWAIT);
70350+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
70351 caches++;
70352 }
70353
70354 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
70355 create_kmalloc_cache(&kmalloc_caches[i],
70356- "kmalloc", 1 << i, GFP_NOWAIT);
70357+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
70358 caches++;
70359 }
70360
70361@@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
70362 /*
70363 * We may have set a slab to be unmergeable during bootstrap.
70364 */
70365- if (s->refcount < 0)
70366+ if (atomic_read(&s->refcount) < 0)
70367 return 1;
70368
70369 return 0;
70370@@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
70371 if (s) {
70372 int cpu;
70373
70374- s->refcount++;
70375+ atomic_inc(&s->refcount);
70376 /*
70377 * Adjust the object sizes so that we clear
70378 * the complete object on kzalloc.
70379@@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
70380
70381 if (sysfs_slab_alias(s, name)) {
70382 down_write(&slub_lock);
70383- s->refcount--;
70384+ atomic_dec(&s->refcount);
70385 up_write(&slub_lock);
70386 goto err;
70387 }
70388@@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
70389
70390 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
70391 {
70392- return sprintf(buf, "%d\n", s->refcount - 1);
70393+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
70394 }
70395 SLAB_ATTR_RO(aliases);
70396
70397@@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
70398 kfree(s);
70399 }
70400
70401-static struct sysfs_ops slab_sysfs_ops = {
70402+static const struct sysfs_ops slab_sysfs_ops = {
70403 .show = slab_attr_show,
70404 .store = slab_attr_store,
70405 };
70406@@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
70407 return 0;
70408 }
70409
70410-static struct kset_uevent_ops slab_uevent_ops = {
70411+static const struct kset_uevent_ops slab_uevent_ops = {
70412 .filter = uevent_filter,
70413 };
70414
70415@@ -4785,7 +4824,13 @@ static const struct file_operations proc
70416
70417 static int __init slab_proc_init(void)
70418 {
70419- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
70420+ mode_t gr_mode = S_IRUGO;
70421+
70422+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70423+ gr_mode = S_IRUSR;
70424+#endif
70425+
70426+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
70427 return 0;
70428 }
70429 module_init(slab_proc_init);
70430diff -urNp linux-2.6.32.46/mm/swap.c linux-2.6.32.46/mm/swap.c
70431--- linux-2.6.32.46/mm/swap.c 2011-03-27 14:31:47.000000000 -0400
70432+++ linux-2.6.32.46/mm/swap.c 2011-07-09 09:15:19.000000000 -0400
70433@@ -30,6 +30,7 @@
70434 #include <linux/notifier.h>
70435 #include <linux/backing-dev.h>
70436 #include <linux/memcontrol.h>
70437+#include <linux/hugetlb.h>
70438
70439 #include "internal.h"
70440
70441@@ -65,6 +66,8 @@ static void put_compound_page(struct pag
70442 compound_page_dtor *dtor;
70443
70444 dtor = get_compound_page_dtor(page);
70445+ if (!PageHuge(page))
70446+ BUG_ON(dtor != free_compound_page);
70447 (*dtor)(page);
70448 }
70449 }
70450diff -urNp linux-2.6.32.46/mm/util.c linux-2.6.32.46/mm/util.c
70451--- linux-2.6.32.46/mm/util.c 2011-03-27 14:31:47.000000000 -0400
70452+++ linux-2.6.32.46/mm/util.c 2011-04-17 15:56:46.000000000 -0400
70453@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
70454 void arch_pick_mmap_layout(struct mm_struct *mm)
70455 {
70456 mm->mmap_base = TASK_UNMAPPED_BASE;
70457+
70458+#ifdef CONFIG_PAX_RANDMMAP
70459+ if (mm->pax_flags & MF_PAX_RANDMMAP)
70460+ mm->mmap_base += mm->delta_mmap;
70461+#endif
70462+
70463 mm->get_unmapped_area = arch_get_unmapped_area;
70464 mm->unmap_area = arch_unmap_area;
70465 }
70466diff -urNp linux-2.6.32.46/mm/vmalloc.c linux-2.6.32.46/mm/vmalloc.c
70467--- linux-2.6.32.46/mm/vmalloc.c 2011-08-29 22:24:44.000000000 -0400
70468+++ linux-2.6.32.46/mm/vmalloc.c 2011-08-29 22:25:07.000000000 -0400
70469@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
70470
70471 pte = pte_offset_kernel(pmd, addr);
70472 do {
70473- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70474- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70475+
70476+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70477+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
70478+ BUG_ON(!pte_exec(*pte));
70479+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
70480+ continue;
70481+ }
70482+#endif
70483+
70484+ {
70485+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70486+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70487+ }
70488 } while (pte++, addr += PAGE_SIZE, addr != end);
70489 }
70490
70491@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
70492 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
70493 {
70494 pte_t *pte;
70495+ int ret = -ENOMEM;
70496
70497 /*
70498 * nr is a running index into the array which helps higher level
70499@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
70500 pte = pte_alloc_kernel(pmd, addr);
70501 if (!pte)
70502 return -ENOMEM;
70503+
70504+ pax_open_kernel();
70505 do {
70506 struct page *page = pages[*nr];
70507
70508- if (WARN_ON(!pte_none(*pte)))
70509- return -EBUSY;
70510- if (WARN_ON(!page))
70511- return -ENOMEM;
70512+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70513+ if (!(pgprot_val(prot) & _PAGE_NX))
70514+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
70515+ else
70516+#endif
70517+
70518+ if (WARN_ON(!pte_none(*pte))) {
70519+ ret = -EBUSY;
70520+ goto out;
70521+ }
70522+ if (WARN_ON(!page)) {
70523+ ret = -ENOMEM;
70524+ goto out;
70525+ }
70526 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
70527 (*nr)++;
70528 } while (pte++, addr += PAGE_SIZE, addr != end);
70529- return 0;
70530+ ret = 0;
70531+out:
70532+ pax_close_kernel();
70533+ return ret;
70534 }
70535
70536 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
70537@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
70538 * and fall back on vmalloc() if that fails. Others
70539 * just put it in the vmalloc space.
70540 */
70541-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
70542+#ifdef CONFIG_MODULES
70543+#ifdef MODULES_VADDR
70544 unsigned long addr = (unsigned long)x;
70545 if (addr >= MODULES_VADDR && addr < MODULES_END)
70546 return 1;
70547 #endif
70548+
70549+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70550+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
70551+ return 1;
70552+#endif
70553+
70554+#endif
70555+
70556 return is_vmalloc_addr(x);
70557 }
70558
70559@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
70560
70561 if (!pgd_none(*pgd)) {
70562 pud_t *pud = pud_offset(pgd, addr);
70563+#ifdef CONFIG_X86
70564+ if (!pud_large(*pud))
70565+#endif
70566 if (!pud_none(*pud)) {
70567 pmd_t *pmd = pmd_offset(pud, addr);
70568+#ifdef CONFIG_X86
70569+ if (!pmd_large(*pmd))
70570+#endif
70571 if (!pmd_none(*pmd)) {
70572 pte_t *ptep, pte;
70573
70574@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
70575 struct rb_node *tmp;
70576
70577 while (*p) {
70578- struct vmap_area *tmp;
70579+ struct vmap_area *varea;
70580
70581 parent = *p;
70582- tmp = rb_entry(parent, struct vmap_area, rb_node);
70583- if (va->va_start < tmp->va_end)
70584+ varea = rb_entry(parent, struct vmap_area, rb_node);
70585+ if (va->va_start < varea->va_end)
70586 p = &(*p)->rb_left;
70587- else if (va->va_end > tmp->va_start)
70588+ else if (va->va_end > varea->va_start)
70589 p = &(*p)->rb_right;
70590 else
70591 BUG();
70592@@ -1233,6 +1275,16 @@ static struct vm_struct *__get_vm_area_n
70593 struct vm_struct *area;
70594
70595 BUG_ON(in_interrupt());
70596+
70597+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70598+ if (flags & VM_KERNEXEC) {
70599+ if (start != VMALLOC_START || end != VMALLOC_END)
70600+ return NULL;
70601+ start = (unsigned long)MODULES_EXEC_VADDR;
70602+ end = (unsigned long)MODULES_EXEC_END;
70603+ }
70604+#endif
70605+
70606 if (flags & VM_IOREMAP) {
70607 int bit = fls(size);
70608
70609@@ -1458,6 +1510,11 @@ void *vmap(struct page **pages, unsigned
70610 if (count > totalram_pages)
70611 return NULL;
70612
70613+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70614+ if (!(pgprot_val(prot) & _PAGE_NX))
70615+ flags |= VM_KERNEXEC;
70616+#endif
70617+
70618 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
70619 __builtin_return_address(0));
70620 if (!area)
70621@@ -1568,6 +1625,13 @@ static void *__vmalloc_node(unsigned lon
70622 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
70623 return NULL;
70624
70625+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70626+ if (!(pgprot_val(prot) & _PAGE_NX))
70627+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
70628+ node, gfp_mask, caller);
70629+ else
70630+#endif
70631+
70632 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
70633 VMALLOC_END, node, gfp_mask, caller);
70634
70635@@ -1586,6 +1650,7 @@ static void *__vmalloc_node(unsigned lon
70636 return addr;
70637 }
70638
70639+#undef __vmalloc
70640 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
70641 {
70642 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
70643@@ -1602,6 +1667,7 @@ EXPORT_SYMBOL(__vmalloc);
70644 * For tight control over page level allocator and protection flags
70645 * use __vmalloc() instead.
70646 */
70647+#undef vmalloc
70648 void *vmalloc(unsigned long size)
70649 {
70650 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70651@@ -1616,6 +1682,7 @@ EXPORT_SYMBOL(vmalloc);
70652 * The resulting memory area is zeroed so it can be mapped to userspace
70653 * without leaking data.
70654 */
70655+#undef vmalloc_user
70656 void *vmalloc_user(unsigned long size)
70657 {
70658 struct vm_struct *area;
70659@@ -1643,6 +1710,7 @@ EXPORT_SYMBOL(vmalloc_user);
70660 * For tight control over page level allocator and protection flags
70661 * use __vmalloc() instead.
70662 */
70663+#undef vmalloc_node
70664 void *vmalloc_node(unsigned long size, int node)
70665 {
70666 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
70667@@ -1665,10 +1733,10 @@ EXPORT_SYMBOL(vmalloc_node);
70668 * For tight control over page level allocator and protection flags
70669 * use __vmalloc() instead.
70670 */
70671-
70672+#undef vmalloc_exec
70673 void *vmalloc_exec(unsigned long size)
70674 {
70675- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
70676+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
70677 -1, __builtin_return_address(0));
70678 }
70679
70680@@ -1687,6 +1755,7 @@ void *vmalloc_exec(unsigned long size)
70681 * Allocate enough 32bit PA addressable pages to cover @size from the
70682 * page level allocator and map them into contiguous kernel virtual space.
70683 */
70684+#undef vmalloc_32
70685 void *vmalloc_32(unsigned long size)
70686 {
70687 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
70688@@ -1701,6 +1770,7 @@ EXPORT_SYMBOL(vmalloc_32);
70689 * The resulting memory area is 32bit addressable and zeroed so it can be
70690 * mapped to userspace without leaking data.
70691 */
70692+#undef vmalloc_32_user
70693 void *vmalloc_32_user(unsigned long size)
70694 {
70695 struct vm_struct *area;
70696@@ -1965,6 +2035,8 @@ int remap_vmalloc_range(struct vm_area_s
70697 unsigned long uaddr = vma->vm_start;
70698 unsigned long usize = vma->vm_end - vma->vm_start;
70699
70700+ BUG_ON(vma->vm_mirror);
70701+
70702 if ((PAGE_SIZE-1) & (unsigned long)addr)
70703 return -EINVAL;
70704
70705diff -urNp linux-2.6.32.46/mm/vmstat.c linux-2.6.32.46/mm/vmstat.c
70706--- linux-2.6.32.46/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
70707+++ linux-2.6.32.46/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
70708@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
70709 *
70710 * vm_stat contains the global counters
70711 */
70712-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70713+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70714 EXPORT_SYMBOL(vm_stat);
70715
70716 #ifdef CONFIG_SMP
70717@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
70718 v = p->vm_stat_diff[i];
70719 p->vm_stat_diff[i] = 0;
70720 local_irq_restore(flags);
70721- atomic_long_add(v, &zone->vm_stat[i]);
70722+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
70723 global_diff[i] += v;
70724 #ifdef CONFIG_NUMA
70725 /* 3 seconds idle till flush */
70726@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
70727
70728 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
70729 if (global_diff[i])
70730- atomic_long_add(global_diff[i], &vm_stat[i]);
70731+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
70732 }
70733
70734 #endif
70735@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
70736 start_cpu_timer(cpu);
70737 #endif
70738 #ifdef CONFIG_PROC_FS
70739- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
70740- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
70741- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
70742- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
70743+ {
70744+ mode_t gr_mode = S_IRUGO;
70745+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70746+ gr_mode = S_IRUSR;
70747+#endif
70748+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
70749+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
70750+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
70751+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
70752+#else
70753+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
70754+#endif
70755+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
70756+ }
70757 #endif
70758 return 0;
70759 }
70760diff -urNp linux-2.6.32.46/net/8021q/vlan.c linux-2.6.32.46/net/8021q/vlan.c
70761--- linux-2.6.32.46/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
70762+++ linux-2.6.32.46/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
70763@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
70764 err = -EPERM;
70765 if (!capable(CAP_NET_ADMIN))
70766 break;
70767- if ((args.u.name_type >= 0) &&
70768- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
70769+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
70770 struct vlan_net *vn;
70771
70772 vn = net_generic(net, vlan_net_id);
70773diff -urNp linux-2.6.32.46/net/atm/atm_misc.c linux-2.6.32.46/net/atm/atm_misc.c
70774--- linux-2.6.32.46/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
70775+++ linux-2.6.32.46/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
70776@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
70777 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
70778 return 1;
70779 atm_return(vcc,truesize);
70780- atomic_inc(&vcc->stats->rx_drop);
70781+ atomic_inc_unchecked(&vcc->stats->rx_drop);
70782 return 0;
70783 }
70784
70785@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
70786 }
70787 }
70788 atm_return(vcc,guess);
70789- atomic_inc(&vcc->stats->rx_drop);
70790+ atomic_inc_unchecked(&vcc->stats->rx_drop);
70791 return NULL;
70792 }
70793
70794@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
70795
70796 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
70797 {
70798-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
70799+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
70800 __SONET_ITEMS
70801 #undef __HANDLE_ITEM
70802 }
70803@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
70804
70805 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
70806 {
70807-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
70808+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
70809 __SONET_ITEMS
70810 #undef __HANDLE_ITEM
70811 }
70812diff -urNp linux-2.6.32.46/net/atm/lec.h linux-2.6.32.46/net/atm/lec.h
70813--- linux-2.6.32.46/net/atm/lec.h 2011-03-27 14:31:47.000000000 -0400
70814+++ linux-2.6.32.46/net/atm/lec.h 2011-08-05 20:33:55.000000000 -0400
70815@@ -48,7 +48,7 @@ struct lane2_ops {
70816 const u8 *tlvs, u32 sizeoftlvs);
70817 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
70818 const u8 *tlvs, u32 sizeoftlvs);
70819-};
70820+} __no_const;
70821
70822 /*
70823 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
70824diff -urNp linux-2.6.32.46/net/atm/mpc.h linux-2.6.32.46/net/atm/mpc.h
70825--- linux-2.6.32.46/net/atm/mpc.h 2011-03-27 14:31:47.000000000 -0400
70826+++ linux-2.6.32.46/net/atm/mpc.h 2011-08-23 21:22:38.000000000 -0400
70827@@ -33,7 +33,7 @@ struct mpoa_client {
70828 struct mpc_parameters parameters; /* parameters for this client */
70829
70830 const struct net_device_ops *old_ops;
70831- struct net_device_ops new_ops;
70832+ net_device_ops_no_const new_ops;
70833 };
70834
70835
70836diff -urNp linux-2.6.32.46/net/atm/mpoa_caches.c linux-2.6.32.46/net/atm/mpoa_caches.c
70837--- linux-2.6.32.46/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
70838+++ linux-2.6.32.46/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
70839@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
70840 struct timeval now;
70841 struct k_message msg;
70842
70843+ pax_track_stack();
70844+
70845 do_gettimeofday(&now);
70846
70847 write_lock_irq(&client->egress_lock);
70848diff -urNp linux-2.6.32.46/net/atm/proc.c linux-2.6.32.46/net/atm/proc.c
70849--- linux-2.6.32.46/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
70850+++ linux-2.6.32.46/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
70851@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
70852 const struct k_atm_aal_stats *stats)
70853 {
70854 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
70855- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
70856- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
70857- atomic_read(&stats->rx_drop));
70858+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
70859+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
70860+ atomic_read_unchecked(&stats->rx_drop));
70861 }
70862
70863 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
70864@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
70865 {
70866 struct sock *sk = sk_atm(vcc);
70867
70868+#ifdef CONFIG_GRKERNSEC_HIDESYM
70869+ seq_printf(seq, "%p ", NULL);
70870+#else
70871 seq_printf(seq, "%p ", vcc);
70872+#endif
70873+
70874 if (!vcc->dev)
70875 seq_printf(seq, "Unassigned ");
70876 else
70877@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
70878 {
70879 if (!vcc->dev)
70880 seq_printf(seq, sizeof(void *) == 4 ?
70881+#ifdef CONFIG_GRKERNSEC_HIDESYM
70882+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
70883+#else
70884 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
70885+#endif
70886 else
70887 seq_printf(seq, "%3d %3d %5d ",
70888 vcc->dev->number, vcc->vpi, vcc->vci);
70889diff -urNp linux-2.6.32.46/net/atm/resources.c linux-2.6.32.46/net/atm/resources.c
70890--- linux-2.6.32.46/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
70891+++ linux-2.6.32.46/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
70892@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
70893 static void copy_aal_stats(struct k_atm_aal_stats *from,
70894 struct atm_aal_stats *to)
70895 {
70896-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
70897+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
70898 __AAL_STAT_ITEMS
70899 #undef __HANDLE_ITEM
70900 }
70901@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
70902 static void subtract_aal_stats(struct k_atm_aal_stats *from,
70903 struct atm_aal_stats *to)
70904 {
70905-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
70906+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
70907 __AAL_STAT_ITEMS
70908 #undef __HANDLE_ITEM
70909 }
70910diff -urNp linux-2.6.32.46/net/bluetooth/l2cap.c linux-2.6.32.46/net/bluetooth/l2cap.c
70911--- linux-2.6.32.46/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
70912+++ linux-2.6.32.46/net/bluetooth/l2cap.c 2011-06-25 14:36:21.000000000 -0400
70913@@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
70914 err = -ENOTCONN;
70915 break;
70916 }
70917-
70918+ memset(&cinfo, 0, sizeof(cinfo));
70919 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
70920 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
70921
70922@@ -2719,7 +2719,7 @@ static inline int l2cap_config_req(struc
70923
70924 /* Reject if config buffer is too small. */
70925 len = cmd_len - sizeof(*req);
70926- if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
70927+ if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
70928 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
70929 l2cap_build_conf_rsp(sk, rsp,
70930 L2CAP_CONF_REJECT, flags), rsp);
70931diff -urNp linux-2.6.32.46/net/bluetooth/rfcomm/sock.c linux-2.6.32.46/net/bluetooth/rfcomm/sock.c
70932--- linux-2.6.32.46/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
70933+++ linux-2.6.32.46/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
70934@@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
70935
70936 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
70937
70938+ memset(&cinfo, 0, sizeof(cinfo));
70939 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
70940 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
70941
70942diff -urNp linux-2.6.32.46/net/bridge/br_private.h linux-2.6.32.46/net/bridge/br_private.h
70943--- linux-2.6.32.46/net/bridge/br_private.h 2011-08-09 18:35:30.000000000 -0400
70944+++ linux-2.6.32.46/net/bridge/br_private.h 2011-08-09 18:34:01.000000000 -0400
70945@@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event,
70946
70947 #ifdef CONFIG_SYSFS
70948 /* br_sysfs_if.c */
70949-extern struct sysfs_ops brport_sysfs_ops;
70950+extern const struct sysfs_ops brport_sysfs_ops;
70951 extern int br_sysfs_addif(struct net_bridge_port *p);
70952
70953 /* br_sysfs_br.c */
70954diff -urNp linux-2.6.32.46/net/bridge/br_stp_if.c linux-2.6.32.46/net/bridge/br_stp_if.c
70955--- linux-2.6.32.46/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
70956+++ linux-2.6.32.46/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
70957@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
70958 char *envp[] = { NULL };
70959
70960 if (br->stp_enabled == BR_USER_STP) {
70961- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
70962+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
70963 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
70964 br->dev->name, r);
70965
70966diff -urNp linux-2.6.32.46/net/bridge/br_sysfs_if.c linux-2.6.32.46/net/bridge/br_sysfs_if.c
70967--- linux-2.6.32.46/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
70968+++ linux-2.6.32.46/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
70969@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
70970 return ret;
70971 }
70972
70973-struct sysfs_ops brport_sysfs_ops = {
70974+const struct sysfs_ops brport_sysfs_ops = {
70975 .show = brport_show,
70976 .store = brport_store,
70977 };
70978diff -urNp linux-2.6.32.46/net/bridge/netfilter/ebtables.c linux-2.6.32.46/net/bridge/netfilter/ebtables.c
70979--- linux-2.6.32.46/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
70980+++ linux-2.6.32.46/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
70981@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
70982 unsigned int entries_size, nentries;
70983 char *entries;
70984
70985+ pax_track_stack();
70986+
70987 if (cmd == EBT_SO_GET_ENTRIES) {
70988 entries_size = t->private->entries_size;
70989 nentries = t->private->nentries;
70990diff -urNp linux-2.6.32.46/net/can/bcm.c linux-2.6.32.46/net/can/bcm.c
70991--- linux-2.6.32.46/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
70992+++ linux-2.6.32.46/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
70993@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
70994 struct bcm_sock *bo = bcm_sk(sk);
70995 struct bcm_op *op;
70996
70997+#ifdef CONFIG_GRKERNSEC_HIDESYM
70998+ seq_printf(m, ">>> socket %p", NULL);
70999+ seq_printf(m, " / sk %p", NULL);
71000+ seq_printf(m, " / bo %p", NULL);
71001+#else
71002 seq_printf(m, ">>> socket %p", sk->sk_socket);
71003 seq_printf(m, " / sk %p", sk);
71004 seq_printf(m, " / bo %p", bo);
71005+#endif
71006 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
71007 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
71008 seq_printf(m, " <<<\n");
71009diff -urNp linux-2.6.32.46/net/core/dev.c linux-2.6.32.46/net/core/dev.c
71010--- linux-2.6.32.46/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
71011+++ linux-2.6.32.46/net/core/dev.c 2011-08-05 20:33:55.000000000 -0400
71012@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
71013 if (no_module && capable(CAP_NET_ADMIN))
71014 no_module = request_module("netdev-%s", name);
71015 if (no_module && capable(CAP_SYS_MODULE)) {
71016+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71017+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
71018+#else
71019 if (!request_module("%s", name))
71020 pr_err("Loading kernel module for a network device "
71021 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
71022 "instead\n", name);
71023+#endif
71024 }
71025 }
71026 EXPORT_SYMBOL(dev_load);
71027@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct
71028
71029 struct dev_gso_cb {
71030 void (*destructor)(struct sk_buff *skb);
71031-};
71032+} __no_const;
71033
71034 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
71035
71036@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
71037 }
71038 EXPORT_SYMBOL(netif_rx_ni);
71039
71040-static void net_tx_action(struct softirq_action *h)
71041+static void net_tx_action(void)
71042 {
71043 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71044
71045@@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
71046 EXPORT_SYMBOL(netif_napi_del);
71047
71048
71049-static void net_rx_action(struct softirq_action *h)
71050+static void net_rx_action(void)
71051 {
71052 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
71053 unsigned long time_limit = jiffies + 2;
71054diff -urNp linux-2.6.32.46/net/core/flow.c linux-2.6.32.46/net/core/flow.c
71055--- linux-2.6.32.46/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
71056+++ linux-2.6.32.46/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
71057@@ -35,11 +35,11 @@ struct flow_cache_entry {
71058 atomic_t *object_ref;
71059 };
71060
71061-atomic_t flow_cache_genid = ATOMIC_INIT(0);
71062+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
71063
71064 static u32 flow_hash_shift;
71065 #define flow_hash_size (1 << flow_hash_shift)
71066-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
71067+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
71068
71069 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
71070
71071@@ -52,7 +52,7 @@ struct flow_percpu_info {
71072 u32 hash_rnd;
71073 int count;
71074 };
71075-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
71076+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
71077
71078 #define flow_hash_rnd_recalc(cpu) \
71079 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
71080@@ -69,7 +69,7 @@ struct flow_flush_info {
71081 atomic_t cpuleft;
71082 struct completion completion;
71083 };
71084-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
71085+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
71086
71087 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
71088
71089@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
71090 if (fle->family == family &&
71091 fle->dir == dir &&
71092 flow_key_compare(key, &fle->key) == 0) {
71093- if (fle->genid == atomic_read(&flow_cache_genid)) {
71094+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
71095 void *ret = fle->object;
71096
71097 if (ret)
71098@@ -228,7 +228,7 @@ nocache:
71099 err = resolver(net, key, family, dir, &obj, &obj_ref);
71100
71101 if (fle && !err) {
71102- fle->genid = atomic_read(&flow_cache_genid);
71103+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
71104
71105 if (fle->object)
71106 atomic_dec(fle->object_ref);
71107@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
71108
71109 fle = flow_table(cpu)[i];
71110 for (; fle; fle = fle->next) {
71111- unsigned genid = atomic_read(&flow_cache_genid);
71112+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
71113
71114 if (!fle->object || fle->genid == genid)
71115 continue;
71116diff -urNp linux-2.6.32.46/net/core/rtnetlink.c linux-2.6.32.46/net/core/rtnetlink.c
71117--- linux-2.6.32.46/net/core/rtnetlink.c 2011-03-27 14:31:47.000000000 -0400
71118+++ linux-2.6.32.46/net/core/rtnetlink.c 2011-08-05 20:33:55.000000000 -0400
71119@@ -57,7 +57,7 @@ struct rtnl_link
71120 {
71121 rtnl_doit_func doit;
71122 rtnl_dumpit_func dumpit;
71123-};
71124+} __no_const;
71125
71126 static DEFINE_MUTEX(rtnl_mutex);
71127
71128diff -urNp linux-2.6.32.46/net/core/secure_seq.c linux-2.6.32.46/net/core/secure_seq.c
71129--- linux-2.6.32.46/net/core/secure_seq.c 2011-08-16 20:37:25.000000000 -0400
71130+++ linux-2.6.32.46/net/core/secure_seq.c 2011-08-07 19:48:09.000000000 -0400
71131@@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be3
71132 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
71133
71134 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
71135- __be16 dport)
71136+ __be16 dport)
71137 {
71138 u32 secret[MD5_MESSAGE_BYTES / 4];
71139 u32 hash[MD5_DIGEST_WORDS];
71140@@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __b
71141 secret[i] = net_secret[i];
71142
71143 md5_transform(hash, secret);
71144-
71145 return hash[0];
71146 }
71147 #endif
71148diff -urNp linux-2.6.32.46/net/core/skbuff.c linux-2.6.32.46/net/core/skbuff.c
71149--- linux-2.6.32.46/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
71150+++ linux-2.6.32.46/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
71151@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
71152 struct sk_buff *frag_iter;
71153 struct sock *sk = skb->sk;
71154
71155+ pax_track_stack();
71156+
71157 /*
71158 * __skb_splice_bits() only fails if the output has no room left,
71159 * so no point in going over the frag_list for the error case.
71160diff -urNp linux-2.6.32.46/net/core/sock.c linux-2.6.32.46/net/core/sock.c
71161--- linux-2.6.32.46/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
71162+++ linux-2.6.32.46/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
71163@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
71164 break;
71165
71166 case SO_PEERCRED:
71167+ {
71168+ struct ucred peercred;
71169 if (len > sizeof(sk->sk_peercred))
71170 len = sizeof(sk->sk_peercred);
71171- if (copy_to_user(optval, &sk->sk_peercred, len))
71172+ peercred = sk->sk_peercred;
71173+ if (copy_to_user(optval, &peercred, len))
71174 return -EFAULT;
71175 goto lenout;
71176+ }
71177
71178 case SO_PEERNAME:
71179 {
71180@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
71181 */
71182 smp_wmb();
71183 atomic_set(&sk->sk_refcnt, 1);
71184- atomic_set(&sk->sk_drops, 0);
71185+ atomic_set_unchecked(&sk->sk_drops, 0);
71186 }
71187 EXPORT_SYMBOL(sock_init_data);
71188
71189diff -urNp linux-2.6.32.46/net/decnet/sysctl_net_decnet.c linux-2.6.32.46/net/decnet/sysctl_net_decnet.c
71190--- linux-2.6.32.46/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
71191+++ linux-2.6.32.46/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
71192@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
71193
71194 if (len > *lenp) len = *lenp;
71195
71196- if (copy_to_user(buffer, addr, len))
71197+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
71198 return -EFAULT;
71199
71200 *lenp = len;
71201@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
71202
71203 if (len > *lenp) len = *lenp;
71204
71205- if (copy_to_user(buffer, devname, len))
71206+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
71207 return -EFAULT;
71208
71209 *lenp = len;
71210diff -urNp linux-2.6.32.46/net/econet/Kconfig linux-2.6.32.46/net/econet/Kconfig
71211--- linux-2.6.32.46/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
71212+++ linux-2.6.32.46/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
71213@@ -4,7 +4,7 @@
71214
71215 config ECONET
71216 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
71217- depends on EXPERIMENTAL && INET
71218+ depends on EXPERIMENTAL && INET && BROKEN
71219 ---help---
71220 Econet is a fairly old and slow networking protocol mainly used by
71221 Acorn computers to access file and print servers. It uses native
71222diff -urNp linux-2.6.32.46/net/ieee802154/dgram.c linux-2.6.32.46/net/ieee802154/dgram.c
71223--- linux-2.6.32.46/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
71224+++ linux-2.6.32.46/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
71225@@ -318,7 +318,7 @@ out:
71226 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
71227 {
71228 if (sock_queue_rcv_skb(sk, skb) < 0) {
71229- atomic_inc(&sk->sk_drops);
71230+ atomic_inc_unchecked(&sk->sk_drops);
71231 kfree_skb(skb);
71232 return NET_RX_DROP;
71233 }
71234diff -urNp linux-2.6.32.46/net/ieee802154/raw.c linux-2.6.32.46/net/ieee802154/raw.c
71235--- linux-2.6.32.46/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
71236+++ linux-2.6.32.46/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
71237@@ -206,7 +206,7 @@ out:
71238 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
71239 {
71240 if (sock_queue_rcv_skb(sk, skb) < 0) {
71241- atomic_inc(&sk->sk_drops);
71242+ atomic_inc_unchecked(&sk->sk_drops);
71243 kfree_skb(skb);
71244 return NET_RX_DROP;
71245 }
71246diff -urNp linux-2.6.32.46/net/ipv4/inet_diag.c linux-2.6.32.46/net/ipv4/inet_diag.c
71247--- linux-2.6.32.46/net/ipv4/inet_diag.c 2011-07-13 17:23:04.000000000 -0400
71248+++ linux-2.6.32.46/net/ipv4/inet_diag.c 2011-06-20 19:31:13.000000000 -0400
71249@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
71250 r->idiag_retrans = 0;
71251
71252 r->id.idiag_if = sk->sk_bound_dev_if;
71253+#ifdef CONFIG_GRKERNSEC_HIDESYM
71254+ r->id.idiag_cookie[0] = 0;
71255+ r->id.idiag_cookie[1] = 0;
71256+#else
71257 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
71258 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
71259+#endif
71260
71261 r->id.idiag_sport = inet->sport;
71262 r->id.idiag_dport = inet->dport;
71263@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
71264 r->idiag_family = tw->tw_family;
71265 r->idiag_retrans = 0;
71266 r->id.idiag_if = tw->tw_bound_dev_if;
71267+
71268+#ifdef CONFIG_GRKERNSEC_HIDESYM
71269+ r->id.idiag_cookie[0] = 0;
71270+ r->id.idiag_cookie[1] = 0;
71271+#else
71272 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
71273 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
71274+#endif
71275+
71276 r->id.idiag_sport = tw->tw_sport;
71277 r->id.idiag_dport = tw->tw_dport;
71278 r->id.idiag_src[0] = tw->tw_rcv_saddr;
71279@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
71280 if (sk == NULL)
71281 goto unlock;
71282
71283+#ifndef CONFIG_GRKERNSEC_HIDESYM
71284 err = -ESTALE;
71285 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
71286 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
71287 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
71288 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
71289 goto out;
71290+#endif
71291
71292 err = -ENOMEM;
71293 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
71294@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
71295 r->idiag_retrans = req->retrans;
71296
71297 r->id.idiag_if = sk->sk_bound_dev_if;
71298+
71299+#ifdef CONFIG_GRKERNSEC_HIDESYM
71300+ r->id.idiag_cookie[0] = 0;
71301+ r->id.idiag_cookie[1] = 0;
71302+#else
71303 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
71304 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
71305+#endif
71306
71307 tmo = req->expires - jiffies;
71308 if (tmo < 0)
71309diff -urNp linux-2.6.32.46/net/ipv4/inet_hashtables.c linux-2.6.32.46/net/ipv4/inet_hashtables.c
71310--- linux-2.6.32.46/net/ipv4/inet_hashtables.c 2011-08-16 20:37:25.000000000 -0400
71311+++ linux-2.6.32.46/net/ipv4/inet_hashtables.c 2011-08-16 20:42:30.000000000 -0400
71312@@ -18,12 +18,15 @@
71313 #include <linux/sched.h>
71314 #include <linux/slab.h>
71315 #include <linux/wait.h>
71316+#include <linux/security.h>
71317
71318 #include <net/inet_connection_sock.h>
71319 #include <net/inet_hashtables.h>
71320 #include <net/secure_seq.h>
71321 #include <net/ip.h>
71322
71323+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
71324+
71325 /*
71326 * Allocate and initialize a new local port bind bucket.
71327 * The bindhash mutex for snum's hash chain must be held here.
71328@@ -491,6 +494,8 @@ ok:
71329 }
71330 spin_unlock(&head->lock);
71331
71332+ gr_update_task_in_ip_table(current, inet_sk(sk));
71333+
71334 if (tw) {
71335 inet_twsk_deschedule(tw, death_row);
71336 inet_twsk_put(tw);
71337diff -urNp linux-2.6.32.46/net/ipv4/inetpeer.c linux-2.6.32.46/net/ipv4/inetpeer.c
71338--- linux-2.6.32.46/net/ipv4/inetpeer.c 2011-08-16 20:37:25.000000000 -0400
71339+++ linux-2.6.32.46/net/ipv4/inetpeer.c 2011-08-07 19:48:09.000000000 -0400
71340@@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 da
71341 struct inet_peer *p, *n;
71342 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
71343
71344+ pax_track_stack();
71345+
71346 /* Look up for the address quickly. */
71347 read_lock_bh(&peer_pool_lock);
71348 p = lookup(daddr, NULL);
71349@@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 da
71350 return NULL;
71351 n->v4daddr = daddr;
71352 atomic_set(&n->refcnt, 1);
71353- atomic_set(&n->rid, 0);
71354+ atomic_set_unchecked(&n->rid, 0);
71355 n->ip_id_count = secure_ip_id(daddr);
71356 n->tcp_ts_stamp = 0;
71357
71358diff -urNp linux-2.6.32.46/net/ipv4/ip_fragment.c linux-2.6.32.46/net/ipv4/ip_fragment.c
71359--- linux-2.6.32.46/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
71360+++ linux-2.6.32.46/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
71361@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
71362 return 0;
71363
71364 start = qp->rid;
71365- end = atomic_inc_return(&peer->rid);
71366+ end = atomic_inc_return_unchecked(&peer->rid);
71367 qp->rid = end;
71368
71369 rc = qp->q.fragments && (end - start) > max;
71370diff -urNp linux-2.6.32.46/net/ipv4/ip_sockglue.c linux-2.6.32.46/net/ipv4/ip_sockglue.c
71371--- linux-2.6.32.46/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
71372+++ linux-2.6.32.46/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
71373@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
71374 int val;
71375 int len;
71376
71377+ pax_track_stack();
71378+
71379 if (level != SOL_IP)
71380 return -EOPNOTSUPP;
71381
71382diff -urNp linux-2.6.32.46/net/ipv4/netfilter/arp_tables.c linux-2.6.32.46/net/ipv4/netfilter/arp_tables.c
71383--- linux-2.6.32.46/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
71384+++ linux-2.6.32.46/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
71385@@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
71386 private = &tmp;
71387 }
71388 #endif
71389+ memset(&info, 0, sizeof(info));
71390 info.valid_hooks = t->valid_hooks;
71391 memcpy(info.hook_entry, private->hook_entry,
71392 sizeof(info.hook_entry));
71393diff -urNp linux-2.6.32.46/net/ipv4/netfilter/ip_queue.c linux-2.6.32.46/net/ipv4/netfilter/ip_queue.c
71394--- linux-2.6.32.46/net/ipv4/netfilter/ip_queue.c 2011-03-27 14:31:47.000000000 -0400
71395+++ linux-2.6.32.46/net/ipv4/netfilter/ip_queue.c 2011-08-21 18:42:53.000000000 -0400
71396@@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, st
71397
71398 if (v->data_len < sizeof(*user_iph))
71399 return 0;
71400+ if (v->data_len > 65535)
71401+ return -EMSGSIZE;
71402+
71403 diff = v->data_len - e->skb->len;
71404 if (diff < 0) {
71405 if (pskb_trim(e->skb, v->data_len))
71406@@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
71407 static inline void
71408 __ipq_rcv_skb(struct sk_buff *skb)
71409 {
71410- int status, type, pid, flags, nlmsglen, skblen;
71411+ int status, type, pid, flags;
71412+ unsigned int nlmsglen, skblen;
71413 struct nlmsghdr *nlh;
71414
71415 skblen = skb->len;
71416diff -urNp linux-2.6.32.46/net/ipv4/netfilter/ip_tables.c linux-2.6.32.46/net/ipv4/netfilter/ip_tables.c
71417--- linux-2.6.32.46/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
71418+++ linux-2.6.32.46/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
71419@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
71420 private = &tmp;
71421 }
71422 #endif
71423+ memset(&info, 0, sizeof(info));
71424 info.valid_hooks = t->valid_hooks;
71425 memcpy(info.hook_entry, private->hook_entry,
71426 sizeof(info.hook_entry));
71427diff -urNp linux-2.6.32.46/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.46/net/ipv4/netfilter/nf_nat_snmp_basic.c
71428--- linux-2.6.32.46/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
71429+++ linux-2.6.32.46/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
71430@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
71431
71432 *len = 0;
71433
71434- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
71435+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
71436 if (*octets == NULL) {
71437 if (net_ratelimit())
71438 printk("OOM in bsalg (%d)\n", __LINE__);
71439diff -urNp linux-2.6.32.46/net/ipv4/raw.c linux-2.6.32.46/net/ipv4/raw.c
71440--- linux-2.6.32.46/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
71441+++ linux-2.6.32.46/net/ipv4/raw.c 2011-08-14 11:46:51.000000000 -0400
71442@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
71443 /* Charge it to the socket. */
71444
71445 if (sock_queue_rcv_skb(sk, skb) < 0) {
71446- atomic_inc(&sk->sk_drops);
71447+ atomic_inc_unchecked(&sk->sk_drops);
71448 kfree_skb(skb);
71449 return NET_RX_DROP;
71450 }
71451@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
71452 int raw_rcv(struct sock *sk, struct sk_buff *skb)
71453 {
71454 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
71455- atomic_inc(&sk->sk_drops);
71456+ atomic_inc_unchecked(&sk->sk_drops);
71457 kfree_skb(skb);
71458 return NET_RX_DROP;
71459 }
71460@@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
71461
71462 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
71463 {
71464+ struct icmp_filter filter;
71465+
71466+ if (optlen < 0)
71467+ return -EINVAL;
71468 if (optlen > sizeof(struct icmp_filter))
71469 optlen = sizeof(struct icmp_filter);
71470- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
71471+ if (copy_from_user(&filter, optval, optlen))
71472 return -EFAULT;
71473+ raw_sk(sk)->filter = filter;
71474+
71475 return 0;
71476 }
71477
71478 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
71479 {
71480 int len, ret = -EFAULT;
71481+ struct icmp_filter filter;
71482
71483 if (get_user(len, optlen))
71484 goto out;
71485@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
71486 if (len > sizeof(struct icmp_filter))
71487 len = sizeof(struct icmp_filter);
71488 ret = -EFAULT;
71489- if (put_user(len, optlen) ||
71490- copy_to_user(optval, &raw_sk(sk)->filter, len))
71491+ filter = raw_sk(sk)->filter;
71492+ if (put_user(len, optlen) || len > sizeof filter ||
71493+ copy_to_user(optval, &filter, len))
71494 goto out;
71495 ret = 0;
71496 out: return ret;
71497@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
71498 sk_wmem_alloc_get(sp),
71499 sk_rmem_alloc_get(sp),
71500 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
71501- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
71502+ atomic_read(&sp->sk_refcnt),
71503+#ifdef CONFIG_GRKERNSEC_HIDESYM
71504+ NULL,
71505+#else
71506+ sp,
71507+#endif
71508+ atomic_read_unchecked(&sp->sk_drops));
71509 }
71510
71511 static int raw_seq_show(struct seq_file *seq, void *v)
71512diff -urNp linux-2.6.32.46/net/ipv4/route.c linux-2.6.32.46/net/ipv4/route.c
71513--- linux-2.6.32.46/net/ipv4/route.c 2011-08-16 20:37:25.000000000 -0400
71514+++ linux-2.6.32.46/net/ipv4/route.c 2011-08-07 19:48:09.000000000 -0400
71515@@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be3
71516
71517 static inline int rt_genid(struct net *net)
71518 {
71519- return atomic_read(&net->ipv4.rt_genid);
71520+ return atomic_read_unchecked(&net->ipv4.rt_genid);
71521 }
71522
71523 #ifdef CONFIG_PROC_FS
71524@@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct n
71525 unsigned char shuffle;
71526
71527 get_random_bytes(&shuffle, sizeof(shuffle));
71528- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
71529+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
71530 }
71531
71532 /*
71533@@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_oper
71534
71535 static __net_init int rt_secret_timer_init(struct net *net)
71536 {
71537- atomic_set(&net->ipv4.rt_genid,
71538+ atomic_set_unchecked(&net->ipv4.rt_genid,
71539 (int) ((num_physpages ^ (num_physpages>>8)) ^
71540 (jiffies ^ (jiffies >> 7))));
71541
71542diff -urNp linux-2.6.32.46/net/ipv4/tcp.c linux-2.6.32.46/net/ipv4/tcp.c
71543--- linux-2.6.32.46/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
71544+++ linux-2.6.32.46/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
71545@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
71546 int val;
71547 int err = 0;
71548
71549+ pax_track_stack();
71550+
71551 /* This is a string value all the others are int's */
71552 if (optname == TCP_CONGESTION) {
71553 char name[TCP_CA_NAME_MAX];
71554@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
71555 struct tcp_sock *tp = tcp_sk(sk);
71556 int val, len;
71557
71558+ pax_track_stack();
71559+
71560 if (get_user(len, optlen))
71561 return -EFAULT;
71562
71563diff -urNp linux-2.6.32.46/net/ipv4/tcp_ipv4.c linux-2.6.32.46/net/ipv4/tcp_ipv4.c
71564--- linux-2.6.32.46/net/ipv4/tcp_ipv4.c 2011-08-16 20:37:25.000000000 -0400
71565+++ linux-2.6.32.46/net/ipv4/tcp_ipv4.c 2011-08-23 21:22:32.000000000 -0400
71566@@ -85,6 +85,9 @@
71567 int sysctl_tcp_tw_reuse __read_mostly;
71568 int sysctl_tcp_low_latency __read_mostly;
71569
71570+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71571+extern int grsec_enable_blackhole;
71572+#endif
71573
71574 #ifdef CONFIG_TCP_MD5SIG
71575 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
71576@@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
71577 return 0;
71578
71579 reset:
71580+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71581+ if (!grsec_enable_blackhole)
71582+#endif
71583 tcp_v4_send_reset(rsk, skb);
71584 discard:
71585 kfree_skb(skb);
71586@@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
71587 TCP_SKB_CB(skb)->sacked = 0;
71588
71589 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
71590- if (!sk)
71591+ if (!sk) {
71592+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71593+ ret = 1;
71594+#endif
71595 goto no_tcp_socket;
71596+ }
71597
71598 process:
71599- if (sk->sk_state == TCP_TIME_WAIT)
71600+ if (sk->sk_state == TCP_TIME_WAIT) {
71601+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71602+ ret = 2;
71603+#endif
71604 goto do_time_wait;
71605+ }
71606
71607 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
71608 goto discard_and_relse;
71609@@ -1651,6 +1665,10 @@ no_tcp_socket:
71610 bad_packet:
71611 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
71612 } else {
71613+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71614+ if (!grsec_enable_blackhole || (ret == 1 &&
71615+ (skb->dev->flags & IFF_LOOPBACK)))
71616+#endif
71617 tcp_v4_send_reset(NULL, skb);
71618 }
71619
71620@@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk
71621 0, /* non standard timer */
71622 0, /* open_requests have no inode */
71623 atomic_read(&sk->sk_refcnt),
71624+#ifdef CONFIG_GRKERNSEC_HIDESYM
71625+ NULL,
71626+#else
71627 req,
71628+#endif
71629 len);
71630 }
71631
71632@@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *s
71633 sock_i_uid(sk),
71634 icsk->icsk_probes_out,
71635 sock_i_ino(sk),
71636- atomic_read(&sk->sk_refcnt), sk,
71637+ atomic_read(&sk->sk_refcnt),
71638+#ifdef CONFIG_GRKERNSEC_HIDESYM
71639+ NULL,
71640+#else
71641+ sk,
71642+#endif
71643 jiffies_to_clock_t(icsk->icsk_rto),
71644 jiffies_to_clock_t(icsk->icsk_ack.ato),
71645 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
71646@@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct in
71647 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
71648 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
71649 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
71650- atomic_read(&tw->tw_refcnt), tw, len);
71651+ atomic_read(&tw->tw_refcnt),
71652+#ifdef CONFIG_GRKERNSEC_HIDESYM
71653+ NULL,
71654+#else
71655+ tw,
71656+#endif
71657+ len);
71658 }
71659
71660 #define TMPSZ 150
71661diff -urNp linux-2.6.32.46/net/ipv4/tcp_minisocks.c linux-2.6.32.46/net/ipv4/tcp_minisocks.c
71662--- linux-2.6.32.46/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
71663+++ linux-2.6.32.46/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
71664@@ -26,6 +26,10 @@
71665 #include <net/inet_common.h>
71666 #include <net/xfrm.h>
71667
71668+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71669+extern int grsec_enable_blackhole;
71670+#endif
71671+
71672 #ifdef CONFIG_SYSCTL
71673 #define SYNC_INIT 0 /* let the user enable it */
71674 #else
71675@@ -672,6 +676,10 @@ listen_overflow:
71676
71677 embryonic_reset:
71678 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
71679+
71680+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71681+ if (!grsec_enable_blackhole)
71682+#endif
71683 if (!(flg & TCP_FLAG_RST))
71684 req->rsk_ops->send_reset(sk, skb);
71685
71686diff -urNp linux-2.6.32.46/net/ipv4/tcp_output.c linux-2.6.32.46/net/ipv4/tcp_output.c
71687--- linux-2.6.32.46/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
71688+++ linux-2.6.32.46/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
71689@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
71690 __u8 *md5_hash_location;
71691 int mss;
71692
71693+ pax_track_stack();
71694+
71695 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
71696 if (skb == NULL)
71697 return NULL;
71698diff -urNp linux-2.6.32.46/net/ipv4/tcp_probe.c linux-2.6.32.46/net/ipv4/tcp_probe.c
71699--- linux-2.6.32.46/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
71700+++ linux-2.6.32.46/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
71701@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
71702 if (cnt + width >= len)
71703 break;
71704
71705- if (copy_to_user(buf + cnt, tbuf, width))
71706+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
71707 return -EFAULT;
71708 cnt += width;
71709 }
71710diff -urNp linux-2.6.32.46/net/ipv4/tcp_timer.c linux-2.6.32.46/net/ipv4/tcp_timer.c
71711--- linux-2.6.32.46/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
71712+++ linux-2.6.32.46/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
71713@@ -21,6 +21,10 @@
71714 #include <linux/module.h>
71715 #include <net/tcp.h>
71716
71717+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71718+extern int grsec_lastack_retries;
71719+#endif
71720+
71721 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
71722 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
71723 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
71724@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
71725 }
71726 }
71727
71728+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71729+ if ((sk->sk_state == TCP_LAST_ACK) &&
71730+ (grsec_lastack_retries > 0) &&
71731+ (grsec_lastack_retries < retry_until))
71732+ retry_until = grsec_lastack_retries;
71733+#endif
71734+
71735 if (retransmits_timed_out(sk, retry_until)) {
71736 /* Has it gone just too far? */
71737 tcp_write_err(sk);
71738diff -urNp linux-2.6.32.46/net/ipv4/udp.c linux-2.6.32.46/net/ipv4/udp.c
71739--- linux-2.6.32.46/net/ipv4/udp.c 2011-07-13 17:23:04.000000000 -0400
71740+++ linux-2.6.32.46/net/ipv4/udp.c 2011-08-23 21:22:32.000000000 -0400
71741@@ -86,6 +86,7 @@
71742 #include <linux/types.h>
71743 #include <linux/fcntl.h>
71744 #include <linux/module.h>
71745+#include <linux/security.h>
71746 #include <linux/socket.h>
71747 #include <linux/sockios.h>
71748 #include <linux/igmp.h>
71749@@ -106,6 +107,10 @@
71750 #include <net/xfrm.h>
71751 #include "udp_impl.h"
71752
71753+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71754+extern int grsec_enable_blackhole;
71755+#endif
71756+
71757 struct udp_table udp_table;
71758 EXPORT_SYMBOL(udp_table);
71759
71760@@ -371,6 +376,9 @@ found:
71761 return s;
71762 }
71763
71764+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
71765+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
71766+
71767 /*
71768 * This routine is called by the ICMP module when it gets some
71769 * sort of error condition. If err < 0 then the socket should
71770@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
71771 dport = usin->sin_port;
71772 if (dport == 0)
71773 return -EINVAL;
71774+
71775+ err = gr_search_udp_sendmsg(sk, usin);
71776+ if (err)
71777+ return err;
71778 } else {
71779 if (sk->sk_state != TCP_ESTABLISHED)
71780 return -EDESTADDRREQ;
71781+
71782+ err = gr_search_udp_sendmsg(sk, NULL);
71783+ if (err)
71784+ return err;
71785+
71786 daddr = inet->daddr;
71787 dport = inet->dport;
71788 /* Open fast path for connected socket.
71789@@ -945,6 +962,10 @@ try_again:
71790 if (!skb)
71791 goto out;
71792
71793+ err = gr_search_udp_recvmsg(sk, skb);
71794+ if (err)
71795+ goto out_free;
71796+
71797 ulen = skb->len - sizeof(struct udphdr);
71798 copied = len;
71799 if (copied > ulen)
71800@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct so
71801 if (rc == -ENOMEM) {
71802 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
71803 is_udplite);
71804- atomic_inc(&sk->sk_drops);
71805+ atomic_inc_unchecked(&sk->sk_drops);
71806 }
71807 goto drop;
71808 }
71809@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
71810 goto csum_error;
71811
71812 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
71813+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71814+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
71815+#endif
71816 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
71817
71818 /*
71819@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock
71820 sk_wmem_alloc_get(sp),
71821 sk_rmem_alloc_get(sp),
71822 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
71823- atomic_read(&sp->sk_refcnt), sp,
71824- atomic_read(&sp->sk_drops), len);
71825+ atomic_read(&sp->sk_refcnt),
71826+#ifdef CONFIG_GRKERNSEC_HIDESYM
71827+ NULL,
71828+#else
71829+ sp,
71830+#endif
71831+ atomic_read_unchecked(&sp->sk_drops), len);
71832 }
71833
71834 int udp4_seq_show(struct seq_file *seq, void *v)
71835diff -urNp linux-2.6.32.46/net/ipv6/inet6_connection_sock.c linux-2.6.32.46/net/ipv6/inet6_connection_sock.c
71836--- linux-2.6.32.46/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
71837+++ linux-2.6.32.46/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
71838@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
71839 #ifdef CONFIG_XFRM
71840 {
71841 struct rt6_info *rt = (struct rt6_info *)dst;
71842- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
71843+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
71844 }
71845 #endif
71846 }
71847@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
71848 #ifdef CONFIG_XFRM
71849 if (dst) {
71850 struct rt6_info *rt = (struct rt6_info *)dst;
71851- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
71852+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
71853 sk->sk_dst_cache = NULL;
71854 dst_release(dst);
71855 dst = NULL;
71856diff -urNp linux-2.6.32.46/net/ipv6/inet6_hashtables.c linux-2.6.32.46/net/ipv6/inet6_hashtables.c
71857--- linux-2.6.32.46/net/ipv6/inet6_hashtables.c 2011-08-16 20:37:25.000000000 -0400
71858+++ linux-2.6.32.46/net/ipv6/inet6_hashtables.c 2011-08-07 19:48:09.000000000 -0400
71859@@ -119,7 +119,7 @@ out:
71860 }
71861 EXPORT_SYMBOL(__inet6_lookup_established);
71862
71863-static int inline compute_score(struct sock *sk, struct net *net,
71864+static inline int compute_score(struct sock *sk, struct net *net,
71865 const unsigned short hnum,
71866 const struct in6_addr *daddr,
71867 const int dif)
71868diff -urNp linux-2.6.32.46/net/ipv6/ip6_tunnel.c linux-2.6.32.46/net/ipv6/ip6_tunnel.c
71869--- linux-2.6.32.46/net/ipv6/ip6_tunnel.c 2011-08-09 18:35:30.000000000 -0400
71870+++ linux-2.6.32.46/net/ipv6/ip6_tunnel.c 2011-08-24 18:52:25.000000000 -0400
71871@@ -1466,7 +1466,7 @@ static int __init ip6_tunnel_init(void)
71872 {
71873 int err;
71874
71875- err = register_pernet_device(&ip6_tnl_net_ops);
71876+ err = register_pernet_gen_device(&ip6_tnl_net_id, &ip6_tnl_net_ops);
71877 if (err < 0)
71878 goto out_pernet;
71879
71880@@ -1487,7 +1487,7 @@ static int __init ip6_tunnel_init(void)
71881 out_ip6ip6:
71882 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
71883 out_ip4ip6:
71884- unregister_pernet_device(&ip6_tnl_net_ops);
71885+ unregister_pernet_gen_device(ip6_tnl_net_id, &ip6_tnl_net_ops);
71886 out_pernet:
71887 return err;
71888 }
71889diff -urNp linux-2.6.32.46/net/ipv6/ipv6_sockglue.c linux-2.6.32.46/net/ipv6/ipv6_sockglue.c
71890--- linux-2.6.32.46/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
71891+++ linux-2.6.32.46/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
71892@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
71893 int val, valbool;
71894 int retv = -ENOPROTOOPT;
71895
71896+ pax_track_stack();
71897+
71898 if (optval == NULL)
71899 val=0;
71900 else {
71901@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
71902 int len;
71903 int val;
71904
71905+ pax_track_stack();
71906+
71907 if (ip6_mroute_opt(optname))
71908 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
71909
71910diff -urNp linux-2.6.32.46/net/ipv6/netfilter/ip6_queue.c linux-2.6.32.46/net/ipv6/netfilter/ip6_queue.c
71911--- linux-2.6.32.46/net/ipv6/netfilter/ip6_queue.c 2011-03-27 14:31:47.000000000 -0400
71912+++ linux-2.6.32.46/net/ipv6/netfilter/ip6_queue.c 2011-08-21 18:43:32.000000000 -0400
71913@@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, st
71914
71915 if (v->data_len < sizeof(*user_iph))
71916 return 0;
71917+ if (v->data_len > 65535)
71918+ return -EMSGSIZE;
71919+
71920 diff = v->data_len - e->skb->len;
71921 if (diff < 0) {
71922 if (pskb_trim(e->skb, v->data_len))
71923@@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
71924 static inline void
71925 __ipq_rcv_skb(struct sk_buff *skb)
71926 {
71927- int status, type, pid, flags, nlmsglen, skblen;
71928+ int status, type, pid, flags;
71929+ unsigned int nlmsglen, skblen;
71930 struct nlmsghdr *nlh;
71931
71932 skblen = skb->len;
71933diff -urNp linux-2.6.32.46/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.46/net/ipv6/netfilter/ip6_tables.c
71934--- linux-2.6.32.46/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
71935+++ linux-2.6.32.46/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
71936@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
71937 private = &tmp;
71938 }
71939 #endif
71940+ memset(&info, 0, sizeof(info));
71941 info.valid_hooks = t->valid_hooks;
71942 memcpy(info.hook_entry, private->hook_entry,
71943 sizeof(info.hook_entry));
71944diff -urNp linux-2.6.32.46/net/ipv6/raw.c linux-2.6.32.46/net/ipv6/raw.c
71945--- linux-2.6.32.46/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
71946+++ linux-2.6.32.46/net/ipv6/raw.c 2011-08-14 11:48:20.000000000 -0400
71947@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
71948 {
71949 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
71950 skb_checksum_complete(skb)) {
71951- atomic_inc(&sk->sk_drops);
71952+ atomic_inc_unchecked(&sk->sk_drops);
71953 kfree_skb(skb);
71954 return NET_RX_DROP;
71955 }
71956
71957 /* Charge it to the socket. */
71958 if (sock_queue_rcv_skb(sk,skb)<0) {
71959- atomic_inc(&sk->sk_drops);
71960+ atomic_inc_unchecked(&sk->sk_drops);
71961 kfree_skb(skb);
71962 return NET_RX_DROP;
71963 }
71964@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
71965 struct raw6_sock *rp = raw6_sk(sk);
71966
71967 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
71968- atomic_inc(&sk->sk_drops);
71969+ atomic_inc_unchecked(&sk->sk_drops);
71970 kfree_skb(skb);
71971 return NET_RX_DROP;
71972 }
71973@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
71974
71975 if (inet->hdrincl) {
71976 if (skb_checksum_complete(skb)) {
71977- atomic_inc(&sk->sk_drops);
71978+ atomic_inc_unchecked(&sk->sk_drops);
71979 kfree_skb(skb);
71980 return NET_RX_DROP;
71981 }
71982@@ -518,7 +518,7 @@ csum_copy_err:
71983 as some normal condition.
71984 */
71985 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
71986- atomic_inc(&sk->sk_drops);
71987+ atomic_inc_unchecked(&sk->sk_drops);
71988 goto out;
71989 }
71990
71991@@ -600,7 +600,7 @@ out:
71992 return err;
71993 }
71994
71995-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
71996+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
71997 struct flowi *fl, struct rt6_info *rt,
71998 unsigned int flags)
71999 {
72000@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
72001 u16 proto;
72002 int err;
72003
72004+ pax_track_stack();
72005+
72006 /* Rough check on arithmetic overflow,
72007 better check is made in ip6_append_data().
72008 */
72009@@ -916,12 +918,17 @@ do_confirm:
72010 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
72011 char __user *optval, int optlen)
72012 {
72013+ struct icmp6_filter filter;
72014+
72015 switch (optname) {
72016 case ICMPV6_FILTER:
72017+ if (optlen < 0)
72018+ return -EINVAL;
72019 if (optlen > sizeof(struct icmp6_filter))
72020 optlen = sizeof(struct icmp6_filter);
72021- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
72022+ if (copy_from_user(&filter, optval, optlen))
72023 return -EFAULT;
72024+ raw6_sk(sk)->filter = filter;
72025 return 0;
72026 default:
72027 return -ENOPROTOOPT;
72028@@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct so
72029 char __user *optval, int __user *optlen)
72030 {
72031 int len;
72032+ struct icmp6_filter filter;
72033
72034 switch (optname) {
72035 case ICMPV6_FILTER:
72036@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
72037 len = sizeof(struct icmp6_filter);
72038 if (put_user(len, optlen))
72039 return -EFAULT;
72040- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
72041+ filter = raw6_sk(sk)->filter;
72042+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
72043 return -EFAULT;
72044 return 0;
72045 default:
72046@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
72047 0, 0L, 0,
72048 sock_i_uid(sp), 0,
72049 sock_i_ino(sp),
72050- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72051+ atomic_read(&sp->sk_refcnt),
72052+#ifdef CONFIG_GRKERNSEC_HIDESYM
72053+ NULL,
72054+#else
72055+ sp,
72056+#endif
72057+ atomic_read_unchecked(&sp->sk_drops));
72058 }
72059
72060 static int raw6_seq_show(struct seq_file *seq, void *v)
72061diff -urNp linux-2.6.32.46/net/ipv6/tcp_ipv6.c linux-2.6.32.46/net/ipv6/tcp_ipv6.c
72062--- linux-2.6.32.46/net/ipv6/tcp_ipv6.c 2011-08-16 20:37:25.000000000 -0400
72063+++ linux-2.6.32.46/net/ipv6/tcp_ipv6.c 2011-08-07 19:48:09.000000000 -0400
72064@@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
72065 }
72066 #endif
72067
72068+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72069+extern int grsec_enable_blackhole;
72070+#endif
72071+
72072 static void tcp_v6_hash(struct sock *sk)
72073 {
72074 if (sk->sk_state != TCP_CLOSE) {
72075@@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk
72076 return 0;
72077
72078 reset:
72079+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72080+ if (!grsec_enable_blackhole)
72081+#endif
72082 tcp_v6_send_reset(sk, skb);
72083 discard:
72084 if (opt_skb)
72085@@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
72086 TCP_SKB_CB(skb)->sacked = 0;
72087
72088 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72089- if (!sk)
72090+ if (!sk) {
72091+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72092+ ret = 1;
72093+#endif
72094 goto no_tcp_socket;
72095+ }
72096
72097 process:
72098- if (sk->sk_state == TCP_TIME_WAIT)
72099+ if (sk->sk_state == TCP_TIME_WAIT) {
72100+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72101+ ret = 2;
72102+#endif
72103 goto do_time_wait;
72104+ }
72105
72106 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
72107 goto discard_and_relse;
72108@@ -1701,6 +1716,10 @@ no_tcp_socket:
72109 bad_packet:
72110 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72111 } else {
72112+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72113+ if (!grsec_enable_blackhole || (ret == 1 &&
72114+ (skb->dev->flags & IFF_LOOPBACK)))
72115+#endif
72116 tcp_v6_send_reset(NULL, skb);
72117 }
72118
72119@@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file
72120 uid,
72121 0, /* non standard timer */
72122 0, /* open_requests have no inode */
72123- 0, req);
72124+ 0,
72125+#ifdef CONFIG_GRKERNSEC_HIDESYM
72126+ NULL
72127+#else
72128+ req
72129+#endif
72130+ );
72131 }
72132
72133 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
72134@@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_fil
72135 sock_i_uid(sp),
72136 icsk->icsk_probes_out,
72137 sock_i_ino(sp),
72138- atomic_read(&sp->sk_refcnt), sp,
72139+ atomic_read(&sp->sk_refcnt),
72140+#ifdef CONFIG_GRKERNSEC_HIDESYM
72141+ NULL,
72142+#else
72143+ sp,
72144+#endif
72145 jiffies_to_clock_t(icsk->icsk_rto),
72146 jiffies_to_clock_t(icsk->icsk_ack.ato),
72147 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
72148@@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct se
72149 dest->s6_addr32[2], dest->s6_addr32[3], destp,
72150 tw->tw_substate, 0, 0,
72151 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72152- atomic_read(&tw->tw_refcnt), tw);
72153+ atomic_read(&tw->tw_refcnt),
72154+#ifdef CONFIG_GRKERNSEC_HIDESYM
72155+ NULL
72156+#else
72157+ tw
72158+#endif
72159+ );
72160 }
72161
72162 static int tcp6_seq_show(struct seq_file *seq, void *v)
72163diff -urNp linux-2.6.32.46/net/ipv6/udp.c linux-2.6.32.46/net/ipv6/udp.c
72164--- linux-2.6.32.46/net/ipv6/udp.c 2011-07-13 17:23:04.000000000 -0400
72165+++ linux-2.6.32.46/net/ipv6/udp.c 2011-07-13 17:23:27.000000000 -0400
72166@@ -49,6 +49,10 @@
72167 #include <linux/seq_file.h>
72168 #include "udp_impl.h"
72169
72170+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72171+extern int grsec_enable_blackhole;
72172+#endif
72173+
72174 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
72175 {
72176 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
72177@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
72178 if (rc == -ENOMEM) {
72179 UDP6_INC_STATS_BH(sock_net(sk),
72180 UDP_MIB_RCVBUFERRORS, is_udplite);
72181- atomic_inc(&sk->sk_drops);
72182+ atomic_inc_unchecked(&sk->sk_drops);
72183 }
72184 goto drop;
72185 }
72186@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
72187 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
72188 proto == IPPROTO_UDPLITE);
72189
72190+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72191+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72192+#endif
72193 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
72194
72195 kfree_skb(skb);
72196@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct se
72197 0, 0L, 0,
72198 sock_i_uid(sp), 0,
72199 sock_i_ino(sp),
72200- atomic_read(&sp->sk_refcnt), sp,
72201- atomic_read(&sp->sk_drops));
72202+ atomic_read(&sp->sk_refcnt),
72203+#ifdef CONFIG_GRKERNSEC_HIDESYM
72204+ NULL,
72205+#else
72206+ sp,
72207+#endif
72208+ atomic_read_unchecked(&sp->sk_drops));
72209 }
72210
72211 int udp6_seq_show(struct seq_file *seq, void *v)
72212diff -urNp linux-2.6.32.46/net/irda/ircomm/ircomm_tty.c linux-2.6.32.46/net/irda/ircomm/ircomm_tty.c
72213--- linux-2.6.32.46/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
72214+++ linux-2.6.32.46/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
72215@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
72216 add_wait_queue(&self->open_wait, &wait);
72217
72218 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
72219- __FILE__,__LINE__, tty->driver->name, self->open_count );
72220+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72221
72222 /* As far as I can see, we protect open_count - Jean II */
72223 spin_lock_irqsave(&self->spinlock, flags);
72224 if (!tty_hung_up_p(filp)) {
72225 extra_count = 1;
72226- self->open_count--;
72227+ local_dec(&self->open_count);
72228 }
72229 spin_unlock_irqrestore(&self->spinlock, flags);
72230- self->blocked_open++;
72231+ local_inc(&self->blocked_open);
72232
72233 while (1) {
72234 if (tty->termios->c_cflag & CBAUD) {
72235@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
72236 }
72237
72238 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
72239- __FILE__,__LINE__, tty->driver->name, self->open_count );
72240+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72241
72242 schedule();
72243 }
72244@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
72245 if (extra_count) {
72246 /* ++ is not atomic, so this should be protected - Jean II */
72247 spin_lock_irqsave(&self->spinlock, flags);
72248- self->open_count++;
72249+ local_inc(&self->open_count);
72250 spin_unlock_irqrestore(&self->spinlock, flags);
72251 }
72252- self->blocked_open--;
72253+ local_dec(&self->blocked_open);
72254
72255 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
72256- __FILE__,__LINE__, tty->driver->name, self->open_count);
72257+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
72258
72259 if (!retval)
72260 self->flags |= ASYNC_NORMAL_ACTIVE;
72261@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
72262 }
72263 /* ++ is not atomic, so this should be protected - Jean II */
72264 spin_lock_irqsave(&self->spinlock, flags);
72265- self->open_count++;
72266+ local_inc(&self->open_count);
72267
72268 tty->driver_data = self;
72269 self->tty = tty;
72270 spin_unlock_irqrestore(&self->spinlock, flags);
72271
72272 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
72273- self->line, self->open_count);
72274+ self->line, local_read(&self->open_count));
72275
72276 /* Not really used by us, but lets do it anyway */
72277 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
72278@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
72279 return;
72280 }
72281
72282- if ((tty->count == 1) && (self->open_count != 1)) {
72283+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
72284 /*
72285 * Uh, oh. tty->count is 1, which means that the tty
72286 * structure will be freed. state->count should always
72287@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
72288 */
72289 IRDA_DEBUG(0, "%s(), bad serial port count; "
72290 "tty->count is 1, state->count is %d\n", __func__ ,
72291- self->open_count);
72292- self->open_count = 1;
72293+ local_read(&self->open_count));
72294+ local_set(&self->open_count, 1);
72295 }
72296
72297- if (--self->open_count < 0) {
72298+ if (local_dec_return(&self->open_count) < 0) {
72299 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
72300- __func__, self->line, self->open_count);
72301- self->open_count = 0;
72302+ __func__, self->line, local_read(&self->open_count));
72303+ local_set(&self->open_count, 0);
72304 }
72305- if (self->open_count) {
72306+ if (local_read(&self->open_count)) {
72307 spin_unlock_irqrestore(&self->spinlock, flags);
72308
72309 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
72310@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
72311 tty->closing = 0;
72312 self->tty = NULL;
72313
72314- if (self->blocked_open) {
72315+ if (local_read(&self->blocked_open)) {
72316 if (self->close_delay)
72317 schedule_timeout_interruptible(self->close_delay);
72318 wake_up_interruptible(&self->open_wait);
72319@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
72320 spin_lock_irqsave(&self->spinlock, flags);
72321 self->flags &= ~ASYNC_NORMAL_ACTIVE;
72322 self->tty = NULL;
72323- self->open_count = 0;
72324+ local_set(&self->open_count, 0);
72325 spin_unlock_irqrestore(&self->spinlock, flags);
72326
72327 wake_up_interruptible(&self->open_wait);
72328@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
72329 seq_putc(m, '\n');
72330
72331 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
72332- seq_printf(m, "Open count: %d\n", self->open_count);
72333+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
72334 seq_printf(m, "Max data size: %d\n", self->max_data_size);
72335 seq_printf(m, "Max header size: %d\n", self->max_header_size);
72336
72337diff -urNp linux-2.6.32.46/net/iucv/af_iucv.c linux-2.6.32.46/net/iucv/af_iucv.c
72338--- linux-2.6.32.46/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
72339+++ linux-2.6.32.46/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
72340@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
72341
72342 write_lock_bh(&iucv_sk_list.lock);
72343
72344- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
72345+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72346 while (__iucv_get_sock_by_name(name)) {
72347 sprintf(name, "%08x",
72348- atomic_inc_return(&iucv_sk_list.autobind_name));
72349+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72350 }
72351
72352 write_unlock_bh(&iucv_sk_list.lock);
72353diff -urNp linux-2.6.32.46/net/key/af_key.c linux-2.6.32.46/net/key/af_key.c
72354--- linux-2.6.32.46/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
72355+++ linux-2.6.32.46/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
72356@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
72357 struct xfrm_migrate m[XFRM_MAX_DEPTH];
72358 struct xfrm_kmaddress k;
72359
72360+ pax_track_stack();
72361+
72362 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
72363 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
72364 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
72365@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
72366 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
72367 else
72368 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
72369+#ifdef CONFIG_GRKERNSEC_HIDESYM
72370+ NULL,
72371+#else
72372 s,
72373+#endif
72374 atomic_read(&s->sk_refcnt),
72375 sk_rmem_alloc_get(s),
72376 sk_wmem_alloc_get(s),
72377diff -urNp linux-2.6.32.46/net/lapb/lapb_iface.c linux-2.6.32.46/net/lapb/lapb_iface.c
72378--- linux-2.6.32.46/net/lapb/lapb_iface.c 2011-03-27 14:31:47.000000000 -0400
72379+++ linux-2.6.32.46/net/lapb/lapb_iface.c 2011-08-05 20:33:55.000000000 -0400
72380@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev
72381 goto out;
72382
72383 lapb->dev = dev;
72384- lapb->callbacks = *callbacks;
72385+ lapb->callbacks = callbacks;
72386
72387 __lapb_insert_cb(lapb);
72388
72389@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device
72390
72391 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
72392 {
72393- if (lapb->callbacks.connect_confirmation)
72394- lapb->callbacks.connect_confirmation(lapb->dev, reason);
72395+ if (lapb->callbacks->connect_confirmation)
72396+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
72397 }
72398
72399 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
72400 {
72401- if (lapb->callbacks.connect_indication)
72402- lapb->callbacks.connect_indication(lapb->dev, reason);
72403+ if (lapb->callbacks->connect_indication)
72404+ lapb->callbacks->connect_indication(lapb->dev, reason);
72405 }
72406
72407 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
72408 {
72409- if (lapb->callbacks.disconnect_confirmation)
72410- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
72411+ if (lapb->callbacks->disconnect_confirmation)
72412+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
72413 }
72414
72415 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
72416 {
72417- if (lapb->callbacks.disconnect_indication)
72418- lapb->callbacks.disconnect_indication(lapb->dev, reason);
72419+ if (lapb->callbacks->disconnect_indication)
72420+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
72421 }
72422
72423 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
72424 {
72425- if (lapb->callbacks.data_indication)
72426- return lapb->callbacks.data_indication(lapb->dev, skb);
72427+ if (lapb->callbacks->data_indication)
72428+ return lapb->callbacks->data_indication(lapb->dev, skb);
72429
72430 kfree_skb(skb);
72431 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
72432@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *l
72433 {
72434 int used = 0;
72435
72436- if (lapb->callbacks.data_transmit) {
72437- lapb->callbacks.data_transmit(lapb->dev, skb);
72438+ if (lapb->callbacks->data_transmit) {
72439+ lapb->callbacks->data_transmit(lapb->dev, skb);
72440 used = 1;
72441 }
72442
72443diff -urNp linux-2.6.32.46/net/mac80211/cfg.c linux-2.6.32.46/net/mac80211/cfg.c
72444--- linux-2.6.32.46/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
72445+++ linux-2.6.32.46/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
72446@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
72447 return err;
72448 }
72449
72450-struct cfg80211_ops mac80211_config_ops = {
72451+const struct cfg80211_ops mac80211_config_ops = {
72452 .add_virtual_intf = ieee80211_add_iface,
72453 .del_virtual_intf = ieee80211_del_iface,
72454 .change_virtual_intf = ieee80211_change_iface,
72455diff -urNp linux-2.6.32.46/net/mac80211/cfg.h linux-2.6.32.46/net/mac80211/cfg.h
72456--- linux-2.6.32.46/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
72457+++ linux-2.6.32.46/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
72458@@ -4,6 +4,6 @@
72459 #ifndef __CFG_H
72460 #define __CFG_H
72461
72462-extern struct cfg80211_ops mac80211_config_ops;
72463+extern const struct cfg80211_ops mac80211_config_ops;
72464
72465 #endif /* __CFG_H */
72466diff -urNp linux-2.6.32.46/net/mac80211/debugfs_key.c linux-2.6.32.46/net/mac80211/debugfs_key.c
72467--- linux-2.6.32.46/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
72468+++ linux-2.6.32.46/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
72469@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
72470 size_t count, loff_t *ppos)
72471 {
72472 struct ieee80211_key *key = file->private_data;
72473- int i, res, bufsize = 2 * key->conf.keylen + 2;
72474+ int i, bufsize = 2 * key->conf.keylen + 2;
72475 char *buf = kmalloc(bufsize, GFP_KERNEL);
72476 char *p = buf;
72477+ ssize_t res;
72478+
72479+ if (buf == NULL)
72480+ return -ENOMEM;
72481
72482 for (i = 0; i < key->conf.keylen; i++)
72483 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
72484diff -urNp linux-2.6.32.46/net/mac80211/debugfs_sta.c linux-2.6.32.46/net/mac80211/debugfs_sta.c
72485--- linux-2.6.32.46/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
72486+++ linux-2.6.32.46/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
72487@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
72488 int i;
72489 struct sta_info *sta = file->private_data;
72490
72491+ pax_track_stack();
72492+
72493 spin_lock_bh(&sta->lock);
72494 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
72495 sta->ampdu_mlme.dialog_token_allocator + 1);
72496diff -urNp linux-2.6.32.46/net/mac80211/ieee80211_i.h linux-2.6.32.46/net/mac80211/ieee80211_i.h
72497--- linux-2.6.32.46/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
72498+++ linux-2.6.32.46/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
72499@@ -25,6 +25,7 @@
72500 #include <linux/etherdevice.h>
72501 #include <net/cfg80211.h>
72502 #include <net/mac80211.h>
72503+#include <asm/local.h>
72504 #include "key.h"
72505 #include "sta_info.h"
72506
72507@@ -635,7 +636,7 @@ struct ieee80211_local {
72508 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
72509 spinlock_t queue_stop_reason_lock;
72510
72511- int open_count;
72512+ local_t open_count;
72513 int monitors, cooked_mntrs;
72514 /* number of interfaces with corresponding FIF_ flags */
72515 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
72516diff -urNp linux-2.6.32.46/net/mac80211/iface.c linux-2.6.32.46/net/mac80211/iface.c
72517--- linux-2.6.32.46/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
72518+++ linux-2.6.32.46/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
72519@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
72520 break;
72521 }
72522
72523- if (local->open_count == 0) {
72524+ if (local_read(&local->open_count) == 0) {
72525 res = drv_start(local);
72526 if (res)
72527 goto err_del_bss;
72528@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
72529 * Validate the MAC address for this device.
72530 */
72531 if (!is_valid_ether_addr(dev->dev_addr)) {
72532- if (!local->open_count)
72533+ if (!local_read(&local->open_count))
72534 drv_stop(local);
72535 return -EADDRNOTAVAIL;
72536 }
72537@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
72538
72539 hw_reconf_flags |= __ieee80211_recalc_idle(local);
72540
72541- local->open_count++;
72542+ local_inc(&local->open_count);
72543 if (hw_reconf_flags) {
72544 ieee80211_hw_config(local, hw_reconf_flags);
72545 /*
72546@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
72547 err_del_interface:
72548 drv_remove_interface(local, &conf);
72549 err_stop:
72550- if (!local->open_count)
72551+ if (!local_read(&local->open_count))
72552 drv_stop(local);
72553 err_del_bss:
72554 sdata->bss = NULL;
72555@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
72556 WARN_ON(!list_empty(&sdata->u.ap.vlans));
72557 }
72558
72559- local->open_count--;
72560+ local_dec(&local->open_count);
72561
72562 switch (sdata->vif.type) {
72563 case NL80211_IFTYPE_AP_VLAN:
72564@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
72565
72566 ieee80211_recalc_ps(local, -1);
72567
72568- if (local->open_count == 0) {
72569+ if (local_read(&local->open_count) == 0) {
72570 ieee80211_clear_tx_pending(local);
72571 ieee80211_stop_device(local);
72572
72573diff -urNp linux-2.6.32.46/net/mac80211/main.c linux-2.6.32.46/net/mac80211/main.c
72574--- linux-2.6.32.46/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
72575+++ linux-2.6.32.46/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
72576@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
72577 local->hw.conf.power_level = power;
72578 }
72579
72580- if (changed && local->open_count) {
72581+ if (changed && local_read(&local->open_count)) {
72582 ret = drv_config(local, changed);
72583 /*
72584 * Goal:
72585diff -urNp linux-2.6.32.46/net/mac80211/mlme.c linux-2.6.32.46/net/mac80211/mlme.c
72586--- linux-2.6.32.46/net/mac80211/mlme.c 2011-08-09 18:35:30.000000000 -0400
72587+++ linux-2.6.32.46/net/mac80211/mlme.c 2011-08-09 18:34:01.000000000 -0400
72588@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
72589 bool have_higher_than_11mbit = false, newsta = false;
72590 u16 ap_ht_cap_flags;
72591
72592+ pax_track_stack();
72593+
72594 /*
72595 * AssocResp and ReassocResp have identical structure, so process both
72596 * of them in this function.
72597diff -urNp linux-2.6.32.46/net/mac80211/pm.c linux-2.6.32.46/net/mac80211/pm.c
72598--- linux-2.6.32.46/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
72599+++ linux-2.6.32.46/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
72600@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
72601 }
72602
72603 /* stop hardware - this must stop RX */
72604- if (local->open_count)
72605+ if (local_read(&local->open_count))
72606 ieee80211_stop_device(local);
72607
72608 local->suspended = true;
72609diff -urNp linux-2.6.32.46/net/mac80211/rate.c linux-2.6.32.46/net/mac80211/rate.c
72610--- linux-2.6.32.46/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
72611+++ linux-2.6.32.46/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
72612@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
72613 struct rate_control_ref *ref, *old;
72614
72615 ASSERT_RTNL();
72616- if (local->open_count)
72617+ if (local_read(&local->open_count))
72618 return -EBUSY;
72619
72620 ref = rate_control_alloc(name, local);
72621diff -urNp linux-2.6.32.46/net/mac80211/tx.c linux-2.6.32.46/net/mac80211/tx.c
72622--- linux-2.6.32.46/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
72623+++ linux-2.6.32.46/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
72624@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
72625 return cpu_to_le16(dur);
72626 }
72627
72628-static int inline is_ieee80211_device(struct ieee80211_local *local,
72629+static inline int is_ieee80211_device(struct ieee80211_local *local,
72630 struct net_device *dev)
72631 {
72632 return local == wdev_priv(dev->ieee80211_ptr);
72633diff -urNp linux-2.6.32.46/net/mac80211/util.c linux-2.6.32.46/net/mac80211/util.c
72634--- linux-2.6.32.46/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
72635+++ linux-2.6.32.46/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
72636@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
72637 local->resuming = true;
72638
72639 /* restart hardware */
72640- if (local->open_count) {
72641+ if (local_read(&local->open_count)) {
72642 /*
72643 * Upon resume hardware can sometimes be goofy due to
72644 * various platform / driver / bus issues, so restarting
72645diff -urNp linux-2.6.32.46/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.46/net/netfilter/ipvs/ip_vs_app.c
72646--- linux-2.6.32.46/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
72647+++ linux-2.6.32.46/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
72648@@ -564,7 +564,7 @@ static const struct file_operations ip_v
72649 .open = ip_vs_app_open,
72650 .read = seq_read,
72651 .llseek = seq_lseek,
72652- .release = seq_release,
72653+ .release = seq_release_net,
72654 };
72655 #endif
72656
72657diff -urNp linux-2.6.32.46/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.46/net/netfilter/ipvs/ip_vs_conn.c
72658--- linux-2.6.32.46/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
72659+++ linux-2.6.32.46/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
72660@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
72661 /* if the connection is not template and is created
72662 * by sync, preserve the activity flag.
72663 */
72664- cp->flags |= atomic_read(&dest->conn_flags) &
72665+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
72666 (~IP_VS_CONN_F_INACTIVE);
72667 else
72668- cp->flags |= atomic_read(&dest->conn_flags);
72669+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
72670 cp->dest = dest;
72671
72672 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
72673@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
72674 atomic_set(&cp->refcnt, 1);
72675
72676 atomic_set(&cp->n_control, 0);
72677- atomic_set(&cp->in_pkts, 0);
72678+ atomic_set_unchecked(&cp->in_pkts, 0);
72679
72680 atomic_inc(&ip_vs_conn_count);
72681 if (flags & IP_VS_CONN_F_NO_CPORT)
72682@@ -871,7 +871,7 @@ static const struct file_operations ip_v
72683 .open = ip_vs_conn_open,
72684 .read = seq_read,
72685 .llseek = seq_lseek,
72686- .release = seq_release,
72687+ .release = seq_release_net,
72688 };
72689
72690 static const char *ip_vs_origin_name(unsigned flags)
72691@@ -934,7 +934,7 @@ static const struct file_operations ip_v
72692 .open = ip_vs_conn_sync_open,
72693 .read = seq_read,
72694 .llseek = seq_lseek,
72695- .release = seq_release,
72696+ .release = seq_release_net,
72697 };
72698
72699 #endif
72700@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
72701
72702 /* Don't drop the entry if its number of incoming packets is not
72703 located in [0, 8] */
72704- i = atomic_read(&cp->in_pkts);
72705+ i = atomic_read_unchecked(&cp->in_pkts);
72706 if (i > 8 || i < 0) return 0;
72707
72708 if (!todrop_rate[i]) return 0;
72709diff -urNp linux-2.6.32.46/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.46/net/netfilter/ipvs/ip_vs_core.c
72710--- linux-2.6.32.46/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
72711+++ linux-2.6.32.46/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
72712@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
72713 ret = cp->packet_xmit(skb, cp, pp);
72714 /* do not touch skb anymore */
72715
72716- atomic_inc(&cp->in_pkts);
72717+ atomic_inc_unchecked(&cp->in_pkts);
72718 ip_vs_conn_put(cp);
72719 return ret;
72720 }
72721@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
72722 * Sync connection if it is about to close to
72723 * encorage the standby servers to update the connections timeout
72724 */
72725- pkts = atomic_add_return(1, &cp->in_pkts);
72726+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
72727 if (af == AF_INET &&
72728 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
72729 (((cp->protocol != IPPROTO_TCP ||
72730diff -urNp linux-2.6.32.46/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.46/net/netfilter/ipvs/ip_vs_ctl.c
72731--- linux-2.6.32.46/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
72732+++ linux-2.6.32.46/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
72733@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
72734 ip_vs_rs_hash(dest);
72735 write_unlock_bh(&__ip_vs_rs_lock);
72736 }
72737- atomic_set(&dest->conn_flags, conn_flags);
72738+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
72739
72740 /* bind the service */
72741 if (!dest->svc) {
72742@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
72743 " %-7s %-6d %-10d %-10d\n",
72744 &dest->addr.in6,
72745 ntohs(dest->port),
72746- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72747+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72748 atomic_read(&dest->weight),
72749 atomic_read(&dest->activeconns),
72750 atomic_read(&dest->inactconns));
72751@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
72752 "%-7s %-6d %-10d %-10d\n",
72753 ntohl(dest->addr.ip),
72754 ntohs(dest->port),
72755- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
72756+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
72757 atomic_read(&dest->weight),
72758 atomic_read(&dest->activeconns),
72759 atomic_read(&dest->inactconns));
72760@@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
72761 .open = ip_vs_info_open,
72762 .read = seq_read,
72763 .llseek = seq_lseek,
72764- .release = seq_release_private,
72765+ .release = seq_release_net,
72766 };
72767
72768 #endif
72769@@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
72770 .open = ip_vs_stats_seq_open,
72771 .read = seq_read,
72772 .llseek = seq_lseek,
72773- .release = single_release,
72774+ .release = single_release_net,
72775 };
72776
72777 #endif
72778@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
72779
72780 entry.addr = dest->addr.ip;
72781 entry.port = dest->port;
72782- entry.conn_flags = atomic_read(&dest->conn_flags);
72783+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
72784 entry.weight = atomic_read(&dest->weight);
72785 entry.u_threshold = dest->u_threshold;
72786 entry.l_threshold = dest->l_threshold;
72787@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
72788 unsigned char arg[128];
72789 int ret = 0;
72790
72791+ pax_track_stack();
72792+
72793 if (!capable(CAP_NET_ADMIN))
72794 return -EPERM;
72795
72796@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
72797 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
72798
72799 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
72800- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
72801+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
72802 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
72803 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
72804 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
72805diff -urNp linux-2.6.32.46/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.46/net/netfilter/ipvs/ip_vs_sync.c
72806--- linux-2.6.32.46/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
72807+++ linux-2.6.32.46/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
72808@@ -438,7 +438,7 @@ static void ip_vs_process_message(const
72809
72810 if (opt)
72811 memcpy(&cp->in_seq, opt, sizeof(*opt));
72812- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
72813+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
72814 cp->state = state;
72815 cp->old_state = cp->state;
72816 /*
72817diff -urNp linux-2.6.32.46/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.46/net/netfilter/ipvs/ip_vs_xmit.c
72818--- linux-2.6.32.46/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
72819+++ linux-2.6.32.46/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
72820@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
72821 else
72822 rc = NF_ACCEPT;
72823 /* do not touch skb anymore */
72824- atomic_inc(&cp->in_pkts);
72825+ atomic_inc_unchecked(&cp->in_pkts);
72826 goto out;
72827 }
72828
72829@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
72830 else
72831 rc = NF_ACCEPT;
72832 /* do not touch skb anymore */
72833- atomic_inc(&cp->in_pkts);
72834+ atomic_inc_unchecked(&cp->in_pkts);
72835 goto out;
72836 }
72837
72838diff -urNp linux-2.6.32.46/net/netfilter/Kconfig linux-2.6.32.46/net/netfilter/Kconfig
72839--- linux-2.6.32.46/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
72840+++ linux-2.6.32.46/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
72841@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
72842
72843 To compile it as a module, choose M here. If unsure, say N.
72844
72845+config NETFILTER_XT_MATCH_GRADM
72846+ tristate '"gradm" match support'
72847+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
72848+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
72849+ ---help---
72850+ The gradm match allows to match on grsecurity RBAC being enabled.
72851+ It is useful when iptables rules are applied early on bootup to
72852+ prevent connections to the machine (except from a trusted host)
72853+ while the RBAC system is disabled.
72854+
72855 config NETFILTER_XT_MATCH_HASHLIMIT
72856 tristate '"hashlimit" match support'
72857 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
72858diff -urNp linux-2.6.32.46/net/netfilter/Makefile linux-2.6.32.46/net/netfilter/Makefile
72859--- linux-2.6.32.46/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
72860+++ linux-2.6.32.46/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
72861@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
72862 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
72863 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
72864 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
72865+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
72866 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
72867 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
72868 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
72869diff -urNp linux-2.6.32.46/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.46/net/netfilter/nf_conntrack_netlink.c
72870--- linux-2.6.32.46/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
72871+++ linux-2.6.32.46/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
72872@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
72873 static int
72874 ctnetlink_parse_tuple(const struct nlattr * const cda[],
72875 struct nf_conntrack_tuple *tuple,
72876- enum ctattr_tuple type, u_int8_t l3num)
72877+ enum ctattr_type type, u_int8_t l3num)
72878 {
72879 struct nlattr *tb[CTA_TUPLE_MAX+1];
72880 int err;
72881diff -urNp linux-2.6.32.46/net/netfilter/nfnetlink_log.c linux-2.6.32.46/net/netfilter/nfnetlink_log.c
72882--- linux-2.6.32.46/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
72883+++ linux-2.6.32.46/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
72884@@ -68,7 +68,7 @@ struct nfulnl_instance {
72885 };
72886
72887 static DEFINE_RWLOCK(instances_lock);
72888-static atomic_t global_seq;
72889+static atomic_unchecked_t global_seq;
72890
72891 #define INSTANCE_BUCKETS 16
72892 static struct hlist_head instance_table[INSTANCE_BUCKETS];
72893@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
72894 /* global sequence number */
72895 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
72896 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
72897- htonl(atomic_inc_return(&global_seq)));
72898+ htonl(atomic_inc_return_unchecked(&global_seq)));
72899
72900 if (data_len) {
72901 struct nlattr *nla;
72902diff -urNp linux-2.6.32.46/net/netfilter/xt_gradm.c linux-2.6.32.46/net/netfilter/xt_gradm.c
72903--- linux-2.6.32.46/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
72904+++ linux-2.6.32.46/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
72905@@ -0,0 +1,51 @@
72906+/*
72907+ * gradm match for netfilter
72908