]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/2.6.32.45/grsecurity-2.2.2-2.6.32.45-201108211939.patch
Auto commit, grsecurity-3.1-4.9.13-201703052141.patch added.
[thirdparty/grsecurity-scrape.git] / test / 2.6.32.45 / grsecurity-2.2.2-2.6.32.45-201108211939.patch
CommitLineData
42769c2f
PK
1diff -urNp linux-2.6.32.45/arch/alpha/include/asm/elf.h linux-2.6.32.45/arch/alpha/include/asm/elf.h
2--- linux-2.6.32.45/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3+++ linux-2.6.32.45/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-2.6.32.45/arch/alpha/include/asm/pgtable.h linux-2.6.32.45/arch/alpha/include/asm/pgtable.h
19--- linux-2.6.32.45/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20+++ linux-2.6.32.45/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-2.6.32.45/arch/alpha/kernel/module.c linux-2.6.32.45/arch/alpha/kernel/module.c
40--- linux-2.6.32.45/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41+++ linux-2.6.32.45/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-2.6.32.45/arch/alpha/kernel/osf_sys.c linux-2.6.32.45/arch/alpha/kernel/osf_sys.c
52--- linux-2.6.32.45/arch/alpha/kernel/osf_sys.c 2011-08-09 18:35:28.000000000 -0400
53+++ linux-2.6.32.45/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54@@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-2.6.32.45/arch/alpha/mm/fault.c linux-2.6.32.45/arch/alpha/mm/fault.c
86--- linux-2.6.32.45/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
87+++ linux-2.6.32.45/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-2.6.32.45/arch/arm/include/asm/elf.h linux-2.6.32.45/arch/arm/include/asm/elf.h
245--- linux-2.6.32.45/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
246+++ linux-2.6.32.45/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
247@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263diff -urNp linux-2.6.32.45/arch/arm/include/asm/kmap_types.h linux-2.6.32.45/arch/arm/include/asm/kmap_types.h
264--- linux-2.6.32.45/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
265+++ linux-2.6.32.45/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
266@@ -19,6 +19,7 @@ enum km_type {
267 KM_SOFTIRQ0,
268 KM_SOFTIRQ1,
269 KM_L2_CACHE,
270+ KM_CLEARPAGE,
271 KM_TYPE_NR
272 };
273
274diff -urNp linux-2.6.32.45/arch/arm/include/asm/uaccess.h linux-2.6.32.45/arch/arm/include/asm/uaccess.h
275--- linux-2.6.32.45/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
276+++ linux-2.6.32.45/arch/arm/include/asm/uaccess.h 2011-06-29 21:02:24.000000000 -0400
277@@ -22,6 +22,8 @@
278 #define VERIFY_READ 0
279 #define VERIFY_WRITE 1
280
281+extern void check_object_size(const void *ptr, unsigned long n, bool to);
282+
283 /*
284 * The exception table consists of pairs of addresses: the first is the
285 * address of an instruction that is allowed to fault, and the second is
286@@ -387,8 +389,23 @@ do { \
287
288
289 #ifdef CONFIG_MMU
290-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
291-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
292+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
293+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
294+
295+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
296+{
297+ if (!__builtin_constant_p(n))
298+ check_object_size(to, n, false);
299+ return ___copy_from_user(to, from, n);
300+}
301+
302+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
303+{
304+ if (!__builtin_constant_p(n))
305+ check_object_size(from, n, true);
306+ return ___copy_to_user(to, from, n);
307+}
308+
309 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
310 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
311 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
312@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
313
314 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
315 {
316+ if ((long)n < 0)
317+ return n;
318+
319 if (access_ok(VERIFY_READ, from, n))
320 n = __copy_from_user(to, from, n);
321 else /* security hole - plug it */
322@@ -412,6 +432,9 @@ static inline unsigned long __must_check
323
324 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
325 {
326+ if ((long)n < 0)
327+ return n;
328+
329 if (access_ok(VERIFY_WRITE, to, n))
330 n = __copy_to_user(to, from, n);
331 return n;
332diff -urNp linux-2.6.32.45/arch/arm/kernel/armksyms.c linux-2.6.32.45/arch/arm/kernel/armksyms.c
333--- linux-2.6.32.45/arch/arm/kernel/armksyms.c 2011-03-27 14:31:47.000000000 -0400
334+++ linux-2.6.32.45/arch/arm/kernel/armksyms.c 2011-07-06 19:51:50.000000000 -0400
335@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
336 #ifdef CONFIG_MMU
337 EXPORT_SYMBOL(copy_page);
338
339-EXPORT_SYMBOL(__copy_from_user);
340-EXPORT_SYMBOL(__copy_to_user);
341+EXPORT_SYMBOL(___copy_from_user);
342+EXPORT_SYMBOL(___copy_to_user);
343 EXPORT_SYMBOL(__clear_user);
344
345 EXPORT_SYMBOL(__get_user_1);
346diff -urNp linux-2.6.32.45/arch/arm/kernel/kgdb.c linux-2.6.32.45/arch/arm/kernel/kgdb.c
347--- linux-2.6.32.45/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
348+++ linux-2.6.32.45/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
349@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
350 * and we handle the normal undef case within the do_undefinstr
351 * handler.
352 */
353-struct kgdb_arch arch_kgdb_ops = {
354+const struct kgdb_arch arch_kgdb_ops = {
355 #ifndef __ARMEB__
356 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
357 #else /* ! __ARMEB__ */
358diff -urNp linux-2.6.32.45/arch/arm/kernel/traps.c linux-2.6.32.45/arch/arm/kernel/traps.c
359--- linux-2.6.32.45/arch/arm/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
360+++ linux-2.6.32.45/arch/arm/kernel/traps.c 2011-06-13 21:31:18.000000000 -0400
361@@ -247,6 +247,8 @@ static void __die(const char *str, int e
362
363 DEFINE_SPINLOCK(die_lock);
364
365+extern void gr_handle_kernel_exploit(void);
366+
367 /*
368 * This function is protected against re-entrancy.
369 */
370@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
371 if (panic_on_oops)
372 panic("Fatal exception");
373
374+ gr_handle_kernel_exploit();
375+
376 do_exit(SIGSEGV);
377 }
378
379diff -urNp linux-2.6.32.45/arch/arm/lib/copy_from_user.S linux-2.6.32.45/arch/arm/lib/copy_from_user.S
380--- linux-2.6.32.45/arch/arm/lib/copy_from_user.S 2011-03-27 14:31:47.000000000 -0400
381+++ linux-2.6.32.45/arch/arm/lib/copy_from_user.S 2011-06-29 20:48:38.000000000 -0400
382@@ -16,7 +16,7 @@
383 /*
384 * Prototype:
385 *
386- * size_t __copy_from_user(void *to, const void *from, size_t n)
387+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
388 *
389 * Purpose:
390 *
391@@ -84,11 +84,11 @@
392
393 .text
394
395-ENTRY(__copy_from_user)
396+ENTRY(___copy_from_user)
397
398 #include "copy_template.S"
399
400-ENDPROC(__copy_from_user)
401+ENDPROC(___copy_from_user)
402
403 .section .fixup,"ax"
404 .align 0
405diff -urNp linux-2.6.32.45/arch/arm/lib/copy_to_user.S linux-2.6.32.45/arch/arm/lib/copy_to_user.S
406--- linux-2.6.32.45/arch/arm/lib/copy_to_user.S 2011-03-27 14:31:47.000000000 -0400
407+++ linux-2.6.32.45/arch/arm/lib/copy_to_user.S 2011-06-29 20:46:49.000000000 -0400
408@@ -16,7 +16,7 @@
409 /*
410 * Prototype:
411 *
412- * size_t __copy_to_user(void *to, const void *from, size_t n)
413+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
414 *
415 * Purpose:
416 *
417@@ -88,11 +88,11 @@
418 .text
419
420 ENTRY(__copy_to_user_std)
421-WEAK(__copy_to_user)
422+WEAK(___copy_to_user)
423
424 #include "copy_template.S"
425
426-ENDPROC(__copy_to_user)
427+ENDPROC(___copy_to_user)
428
429 .section .fixup,"ax"
430 .align 0
431diff -urNp linux-2.6.32.45/arch/arm/lib/uaccess.S linux-2.6.32.45/arch/arm/lib/uaccess.S
432--- linux-2.6.32.45/arch/arm/lib/uaccess.S 2011-03-27 14:31:47.000000000 -0400
433+++ linux-2.6.32.45/arch/arm/lib/uaccess.S 2011-06-29 20:48:53.000000000 -0400
434@@ -19,7 +19,7 @@
435
436 #define PAGE_SHIFT 12
437
438-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
439+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
440 * Purpose : copy a block to user memory from kernel memory
441 * Params : to - user memory
442 * : from - kernel memory
443@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fau
444 sub r2, r2, ip
445 b .Lc2u_dest_aligned
446
447-ENTRY(__copy_to_user)
448+ENTRY(___copy_to_user)
449 stmfd sp!, {r2, r4 - r7, lr}
450 cmp r2, #4
451 blt .Lc2u_not_enough
452@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fau
453 ldrgtb r3, [r1], #0
454 USER( strgtbt r3, [r0], #1) @ May fault
455 b .Lc2u_finished
456-ENDPROC(__copy_to_user)
457+ENDPROC(___copy_to_user)
458
459 .section .fixup,"ax"
460 .align 0
461 9001: ldmfd sp!, {r0, r4 - r7, pc}
462 .previous
463
464-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
465+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
466 * Purpose : copy a block from user memory to kernel memory
467 * Params : to - kernel memory
468 * : from - user memory
469@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fau
470 sub r2, r2, ip
471 b .Lcfu_dest_aligned
472
473-ENTRY(__copy_from_user)
474+ENTRY(___copy_from_user)
475 stmfd sp!, {r0, r2, r4 - r7, lr}
476 cmp r2, #4
477 blt .Lcfu_not_enough
478@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fau
479 USER( ldrgtbt r3, [r1], #1) @ May fault
480 strgtb r3, [r0], #1
481 b .Lcfu_finished
482-ENDPROC(__copy_from_user)
483+ENDPROC(___copy_from_user)
484
485 .section .fixup,"ax"
486 .align 0
487diff -urNp linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c
488--- linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c 2011-03-27 14:31:47.000000000 -0400
489+++ linux-2.6.32.45/arch/arm/lib/uaccess_with_memcpy.c 2011-06-29 20:44:35.000000000 -0400
490@@ -97,7 +97,7 @@ out:
491 }
492
493 unsigned long
494-__copy_to_user(void __user *to, const void *from, unsigned long n)
495+___copy_to_user(void __user *to, const void *from, unsigned long n)
496 {
497 /*
498 * This test is stubbed out of the main function above to keep
499diff -urNp linux-2.6.32.45/arch/arm/mach-at91/pm.c linux-2.6.32.45/arch/arm/mach-at91/pm.c
500--- linux-2.6.32.45/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
501+++ linux-2.6.32.45/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
502@@ -348,7 +348,7 @@ static void at91_pm_end(void)
503 }
504
505
506-static struct platform_suspend_ops at91_pm_ops ={
507+static const struct platform_suspend_ops at91_pm_ops ={
508 .valid = at91_pm_valid_state,
509 .begin = at91_pm_begin,
510 .enter = at91_pm_enter,
511diff -urNp linux-2.6.32.45/arch/arm/mach-omap1/pm.c linux-2.6.32.45/arch/arm/mach-omap1/pm.c
512--- linux-2.6.32.45/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
513+++ linux-2.6.32.45/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
514@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
515
516
517
518-static struct platform_suspend_ops omap_pm_ops ={
519+static const struct platform_suspend_ops omap_pm_ops ={
520 .prepare = omap_pm_prepare,
521 .enter = omap_pm_enter,
522 .finish = omap_pm_finish,
523diff -urNp linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c
524--- linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
525+++ linux-2.6.32.45/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
526@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
527 enable_hlt();
528 }
529
530-static struct platform_suspend_ops omap_pm_ops = {
531+static const struct platform_suspend_ops omap_pm_ops = {
532 .prepare = omap2_pm_prepare,
533 .enter = omap2_pm_enter,
534 .finish = omap2_pm_finish,
535diff -urNp linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c
536--- linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
537+++ linux-2.6.32.45/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
538@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
539 return;
540 }
541
542-static struct platform_suspend_ops omap_pm_ops = {
543+static const struct platform_suspend_ops omap_pm_ops = {
544 .begin = omap3_pm_begin,
545 .end = omap3_pm_end,
546 .prepare = omap3_pm_prepare,
547diff -urNp linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c
548--- linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
549+++ linux-2.6.32.45/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
550@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
551 (state == PM_SUSPEND_MEM);
552 }
553
554-static struct platform_suspend_ops pnx4008_pm_ops = {
555+static const struct platform_suspend_ops pnx4008_pm_ops = {
556 .enter = pnx4008_pm_enter,
557 .valid = pnx4008_pm_valid,
558 };
559diff -urNp linux-2.6.32.45/arch/arm/mach-pxa/pm.c linux-2.6.32.45/arch/arm/mach-pxa/pm.c
560--- linux-2.6.32.45/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
561+++ linux-2.6.32.45/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
562@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
563 pxa_cpu_pm_fns->finish();
564 }
565
566-static struct platform_suspend_ops pxa_pm_ops = {
567+static const struct platform_suspend_ops pxa_pm_ops = {
568 .valid = pxa_pm_valid,
569 .enter = pxa_pm_enter,
570 .prepare = pxa_pm_prepare,
571diff -urNp linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c
572--- linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
573+++ linux-2.6.32.45/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
574@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
575 }
576
577 #ifdef CONFIG_PM
578-static struct platform_suspend_ops sharpsl_pm_ops = {
579+static const struct platform_suspend_ops sharpsl_pm_ops = {
580 .prepare = pxa_pm_prepare,
581 .finish = pxa_pm_finish,
582 .enter = corgi_pxa_pm_enter,
583diff -urNp linux-2.6.32.45/arch/arm/mach-sa1100/pm.c linux-2.6.32.45/arch/arm/mach-sa1100/pm.c
584--- linux-2.6.32.45/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
585+++ linux-2.6.32.45/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
586@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
587 return virt_to_phys(sp);
588 }
589
590-static struct platform_suspend_ops sa11x0_pm_ops = {
591+static const struct platform_suspend_ops sa11x0_pm_ops = {
592 .enter = sa11x0_pm_enter,
593 .valid = suspend_valid_only_mem,
594 };
595diff -urNp linux-2.6.32.45/arch/arm/mm/fault.c linux-2.6.32.45/arch/arm/mm/fault.c
596--- linux-2.6.32.45/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
597+++ linux-2.6.32.45/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
598@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
599 }
600 #endif
601
602+#ifdef CONFIG_PAX_PAGEEXEC
603+ if (fsr & FSR_LNX_PF) {
604+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
605+ do_group_exit(SIGKILL);
606+ }
607+#endif
608+
609 tsk->thread.address = addr;
610 tsk->thread.error_code = fsr;
611 tsk->thread.trap_no = 14;
612@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
613 }
614 #endif /* CONFIG_MMU */
615
616+#ifdef CONFIG_PAX_PAGEEXEC
617+void pax_report_insns(void *pc, void *sp)
618+{
619+ long i;
620+
621+ printk(KERN_ERR "PAX: bytes at PC: ");
622+ for (i = 0; i < 20; i++) {
623+ unsigned char c;
624+ if (get_user(c, (__force unsigned char __user *)pc+i))
625+ printk(KERN_CONT "?? ");
626+ else
627+ printk(KERN_CONT "%02x ", c);
628+ }
629+ printk("\n");
630+
631+ printk(KERN_ERR "PAX: bytes at SP-4: ");
632+ for (i = -1; i < 20; i++) {
633+ unsigned long c;
634+ if (get_user(c, (__force unsigned long __user *)sp+i))
635+ printk(KERN_CONT "???????? ");
636+ else
637+ printk(KERN_CONT "%08lx ", c);
638+ }
639+ printk("\n");
640+}
641+#endif
642+
643 /*
644 * First Level Translation Fault Handler
645 *
646diff -urNp linux-2.6.32.45/arch/arm/mm/mmap.c linux-2.6.32.45/arch/arm/mm/mmap.c
647--- linux-2.6.32.45/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
648+++ linux-2.6.32.45/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
649@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
650 if (len > TASK_SIZE)
651 return -ENOMEM;
652
653+#ifdef CONFIG_PAX_RANDMMAP
654+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
655+#endif
656+
657 if (addr) {
658 if (do_align)
659 addr = COLOUR_ALIGN(addr, pgoff);
660@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
661 addr = PAGE_ALIGN(addr);
662
663 vma = find_vma(mm, addr);
664- if (TASK_SIZE - len >= addr &&
665- (!vma || addr + len <= vma->vm_start))
666+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
667 return addr;
668 }
669 if (len > mm->cached_hole_size) {
670- start_addr = addr = mm->free_area_cache;
671+ start_addr = addr = mm->free_area_cache;
672 } else {
673- start_addr = addr = TASK_UNMAPPED_BASE;
674- mm->cached_hole_size = 0;
675+ start_addr = addr = mm->mmap_base;
676+ mm->cached_hole_size = 0;
677 }
678
679 full_search:
680@@ -94,14 +97,14 @@ full_search:
681 * Start a new search - just in case we missed
682 * some holes.
683 */
684- if (start_addr != TASK_UNMAPPED_BASE) {
685- start_addr = addr = TASK_UNMAPPED_BASE;
686+ if (start_addr != mm->mmap_base) {
687+ start_addr = addr = mm->mmap_base;
688 mm->cached_hole_size = 0;
689 goto full_search;
690 }
691 return -ENOMEM;
692 }
693- if (!vma || addr + len <= vma->vm_start) {
694+ if (check_heap_stack_gap(vma, addr, len)) {
695 /*
696 * Remember the place where we stopped the search:
697 */
698diff -urNp linux-2.6.32.45/arch/arm/plat-s3c/pm.c linux-2.6.32.45/arch/arm/plat-s3c/pm.c
699--- linux-2.6.32.45/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
700+++ linux-2.6.32.45/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
701@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
702 s3c_pm_check_cleanup();
703 }
704
705-static struct platform_suspend_ops s3c_pm_ops = {
706+static const struct platform_suspend_ops s3c_pm_ops = {
707 .enter = s3c_pm_enter,
708 .prepare = s3c_pm_prepare,
709 .finish = s3c_pm_finish,
710diff -urNp linux-2.6.32.45/arch/avr32/include/asm/elf.h linux-2.6.32.45/arch/avr32/include/asm/elf.h
711--- linux-2.6.32.45/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
712+++ linux-2.6.32.45/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
713@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
714 the loader. We need to make sure that it is out of the way of the program
715 that it will "exec", and that there is sufficient room for the brk. */
716
717-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
718+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
719
720+#ifdef CONFIG_PAX_ASLR
721+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
722+
723+#define PAX_DELTA_MMAP_LEN 15
724+#define PAX_DELTA_STACK_LEN 15
725+#endif
726
727 /* This yields a mask that user programs can use to figure out what
728 instruction set this CPU supports. This could be done in user space,
729diff -urNp linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h
730--- linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
731+++ linux-2.6.32.45/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
732@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
733 D(11) KM_IRQ1,
734 D(12) KM_SOFTIRQ0,
735 D(13) KM_SOFTIRQ1,
736-D(14) KM_TYPE_NR
737+D(14) KM_CLEARPAGE,
738+D(15) KM_TYPE_NR
739 };
740
741 #undef D
742diff -urNp linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c
743--- linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
744+++ linux-2.6.32.45/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
745@@ -176,7 +176,7 @@ out:
746 return 0;
747 }
748
749-static struct platform_suspend_ops avr32_pm_ops = {
750+static const struct platform_suspend_ops avr32_pm_ops = {
751 .valid = avr32_pm_valid_state,
752 .enter = avr32_pm_enter,
753 };
754diff -urNp linux-2.6.32.45/arch/avr32/mm/fault.c linux-2.6.32.45/arch/avr32/mm/fault.c
755--- linux-2.6.32.45/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
756+++ linux-2.6.32.45/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
757@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
758
759 int exception_trace = 1;
760
761+#ifdef CONFIG_PAX_PAGEEXEC
762+void pax_report_insns(void *pc, void *sp)
763+{
764+ unsigned long i;
765+
766+ printk(KERN_ERR "PAX: bytes at PC: ");
767+ for (i = 0; i < 20; i++) {
768+ unsigned char c;
769+ if (get_user(c, (unsigned char *)pc+i))
770+ printk(KERN_CONT "???????? ");
771+ else
772+ printk(KERN_CONT "%02x ", c);
773+ }
774+ printk("\n");
775+}
776+#endif
777+
778 /*
779 * This routine handles page faults. It determines the address and the
780 * problem, and then passes it off to one of the appropriate routines.
781@@ -157,6 +174,16 @@ bad_area:
782 up_read(&mm->mmap_sem);
783
784 if (user_mode(regs)) {
785+
786+#ifdef CONFIG_PAX_PAGEEXEC
787+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
788+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
789+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
790+ do_group_exit(SIGKILL);
791+ }
792+ }
793+#endif
794+
795 if (exception_trace && printk_ratelimit())
796 printk("%s%s[%d]: segfault at %08lx pc %08lx "
797 "sp %08lx ecr %lu\n",
798diff -urNp linux-2.6.32.45/arch/blackfin/kernel/kgdb.c linux-2.6.32.45/arch/blackfin/kernel/kgdb.c
799--- linux-2.6.32.45/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
800+++ linux-2.6.32.45/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
801@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
802 return -1; /* this means that we do not want to exit from the handler */
803 }
804
805-struct kgdb_arch arch_kgdb_ops = {
806+const struct kgdb_arch arch_kgdb_ops = {
807 .gdb_bpt_instr = {0xa1},
808 #ifdef CONFIG_SMP
809 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
810diff -urNp linux-2.6.32.45/arch/blackfin/mach-common/pm.c linux-2.6.32.45/arch/blackfin/mach-common/pm.c
811--- linux-2.6.32.45/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
812+++ linux-2.6.32.45/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
813@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
814 return 0;
815 }
816
817-struct platform_suspend_ops bfin_pm_ops = {
818+const struct platform_suspend_ops bfin_pm_ops = {
819 .enter = bfin_pm_enter,
820 .valid = bfin_pm_valid,
821 };
822diff -urNp linux-2.6.32.45/arch/frv/include/asm/kmap_types.h linux-2.6.32.45/arch/frv/include/asm/kmap_types.h
823--- linux-2.6.32.45/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
824+++ linux-2.6.32.45/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
825@@ -23,6 +23,7 @@ enum km_type {
826 KM_IRQ1,
827 KM_SOFTIRQ0,
828 KM_SOFTIRQ1,
829+ KM_CLEARPAGE,
830 KM_TYPE_NR
831 };
832
833diff -urNp linux-2.6.32.45/arch/frv/mm/elf-fdpic.c linux-2.6.32.45/arch/frv/mm/elf-fdpic.c
834--- linux-2.6.32.45/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
835+++ linux-2.6.32.45/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
836@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
837 if (addr) {
838 addr = PAGE_ALIGN(addr);
839 vma = find_vma(current->mm, addr);
840- if (TASK_SIZE - len >= addr &&
841- (!vma || addr + len <= vma->vm_start))
842+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
843 goto success;
844 }
845
846@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
847 for (; vma; vma = vma->vm_next) {
848 if (addr > limit)
849 break;
850- if (addr + len <= vma->vm_start)
851+ if (check_heap_stack_gap(vma, addr, len))
852 goto success;
853 addr = vma->vm_end;
854 }
855@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
856 for (; vma; vma = vma->vm_next) {
857 if (addr > limit)
858 break;
859- if (addr + len <= vma->vm_start)
860+ if (check_heap_stack_gap(vma, addr, len))
861 goto success;
862 addr = vma->vm_end;
863 }
864diff -urNp linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c
865--- linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
866+++ linux-2.6.32.45/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
867@@ -17,7 +17,7 @@
868 #include <linux/swiotlb.h>
869 #include <asm/machvec.h>
870
871-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
872+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
873
874 /* swiotlb declarations & definitions: */
875 extern int swiotlb_late_init_with_default_size (size_t size);
876@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
877 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
878 }
879
880-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
881+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
882 {
883 if (use_swiotlb(dev))
884 return &swiotlb_dma_ops;
885diff -urNp linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c
886--- linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
887+++ linux-2.6.32.45/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
888@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
889 },
890 };
891
892-extern struct dma_map_ops swiotlb_dma_ops;
893+extern const struct dma_map_ops swiotlb_dma_ops;
894
895 static int __init
896 sba_init(void)
897@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
898
899 __setup("sbapagesize=",sba_page_override);
900
901-struct dma_map_ops sba_dma_ops = {
902+const struct dma_map_ops sba_dma_ops = {
903 .alloc_coherent = sba_alloc_coherent,
904 .free_coherent = sba_free_coherent,
905 .map_page = sba_map_page,
906diff -urNp linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c
907--- linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
908+++ linux-2.6.32.45/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
909@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
910
911 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
912
913+#ifdef CONFIG_PAX_ASLR
914+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
915+
916+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
917+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
918+#endif
919+
920 /* Ugly but avoids duplication */
921 #include "../../../fs/binfmt_elf.c"
922
923diff -urNp linux-2.6.32.45/arch/ia64/ia32/ia32priv.h linux-2.6.32.45/arch/ia64/ia32/ia32priv.h
924--- linux-2.6.32.45/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
925+++ linux-2.6.32.45/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
926@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
927 #define ELF_DATA ELFDATA2LSB
928 #define ELF_ARCH EM_386
929
930-#define IA32_STACK_TOP IA32_PAGE_OFFSET
931+#ifdef CONFIG_PAX_RANDUSTACK
932+#define __IA32_DELTA_STACK (current->mm->delta_stack)
933+#else
934+#define __IA32_DELTA_STACK 0UL
935+#endif
936+
937+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
938+
939 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
940 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
941
942diff -urNp linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h
943--- linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
944+++ linux-2.6.32.45/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
945@@ -12,7 +12,7 @@
946
947 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
948
949-extern struct dma_map_ops *dma_ops;
950+extern const struct dma_map_ops *dma_ops;
951 extern struct ia64_machine_vector ia64_mv;
952 extern void set_iommu_machvec(void);
953
954@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
955 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
956 dma_addr_t *daddr, gfp_t gfp)
957 {
958- struct dma_map_ops *ops = platform_dma_get_ops(dev);
959+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
960 void *caddr;
961
962 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
963@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
964 static inline void dma_free_coherent(struct device *dev, size_t size,
965 void *caddr, dma_addr_t daddr)
966 {
967- struct dma_map_ops *ops = platform_dma_get_ops(dev);
968+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
969 debug_dma_free_coherent(dev, size, caddr, daddr);
970 ops->free_coherent(dev, size, caddr, daddr);
971 }
972@@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
973
974 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
975 {
976- struct dma_map_ops *ops = platform_dma_get_ops(dev);
977+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
978 return ops->mapping_error(dev, daddr);
979 }
980
981 static inline int dma_supported(struct device *dev, u64 mask)
982 {
983- struct dma_map_ops *ops = platform_dma_get_ops(dev);
984+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
985 return ops->dma_supported(dev, mask);
986 }
987
988diff -urNp linux-2.6.32.45/arch/ia64/include/asm/elf.h linux-2.6.32.45/arch/ia64/include/asm/elf.h
989--- linux-2.6.32.45/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
990+++ linux-2.6.32.45/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
991@@ -43,6 +43,13 @@
992 */
993 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
994
995+#ifdef CONFIG_PAX_ASLR
996+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
997+
998+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
999+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1000+#endif
1001+
1002 #define PT_IA_64_UNWIND 0x70000001
1003
1004 /* IA-64 relocations: */
1005diff -urNp linux-2.6.32.45/arch/ia64/include/asm/machvec.h linux-2.6.32.45/arch/ia64/include/asm/machvec.h
1006--- linux-2.6.32.45/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
1007+++ linux-2.6.32.45/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
1008@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
1009 /* DMA-mapping interface: */
1010 typedef void ia64_mv_dma_init (void);
1011 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1012-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1013+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1014
1015 /*
1016 * WARNING: The legacy I/O space is _architected_. Platforms are
1017@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
1018 # endif /* CONFIG_IA64_GENERIC */
1019
1020 extern void swiotlb_dma_init(void);
1021-extern struct dma_map_ops *dma_get_ops(struct device *);
1022+extern const struct dma_map_ops *dma_get_ops(struct device *);
1023
1024 /*
1025 * Define default versions so we can extend machvec for new platforms without having
1026diff -urNp linux-2.6.32.45/arch/ia64/include/asm/pgtable.h linux-2.6.32.45/arch/ia64/include/asm/pgtable.h
1027--- linux-2.6.32.45/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1028+++ linux-2.6.32.45/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1029@@ -12,7 +12,7 @@
1030 * David Mosberger-Tang <davidm@hpl.hp.com>
1031 */
1032
1033-
1034+#include <linux/const.h>
1035 #include <asm/mman.h>
1036 #include <asm/page.h>
1037 #include <asm/processor.h>
1038@@ -143,6 +143,17 @@
1039 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1040 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1041 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1042+
1043+#ifdef CONFIG_PAX_PAGEEXEC
1044+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1045+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1046+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1047+#else
1048+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1049+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1050+# define PAGE_COPY_NOEXEC PAGE_COPY
1051+#endif
1052+
1053 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1054 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1055 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1056diff -urNp linux-2.6.32.45/arch/ia64/include/asm/spinlock.h linux-2.6.32.45/arch/ia64/include/asm/spinlock.h
1057--- linux-2.6.32.45/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
1058+++ linux-2.6.32.45/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
1059@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
1060 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1061
1062 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1063- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1064+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1065 }
1066
1067 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1068diff -urNp linux-2.6.32.45/arch/ia64/include/asm/uaccess.h linux-2.6.32.45/arch/ia64/include/asm/uaccess.h
1069--- linux-2.6.32.45/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
1070+++ linux-2.6.32.45/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
1071@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1072 const void *__cu_from = (from); \
1073 long __cu_len = (n); \
1074 \
1075- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1076+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1077 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1078 __cu_len; \
1079 })
1080@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1081 long __cu_len = (n); \
1082 \
1083 __chk_user_ptr(__cu_from); \
1084- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1085+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1086 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1087 __cu_len; \
1088 })
1089diff -urNp linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c
1090--- linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
1091+++ linux-2.6.32.45/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
1092@@ -3,7 +3,7 @@
1093 /* Set this to 1 if there is a HW IOMMU in the system */
1094 int iommu_detected __read_mostly;
1095
1096-struct dma_map_ops *dma_ops;
1097+const struct dma_map_ops *dma_ops;
1098 EXPORT_SYMBOL(dma_ops);
1099
1100 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1101@@ -16,7 +16,7 @@ static int __init dma_init(void)
1102 }
1103 fs_initcall(dma_init);
1104
1105-struct dma_map_ops *dma_get_ops(struct device *dev)
1106+const struct dma_map_ops *dma_get_ops(struct device *dev)
1107 {
1108 return dma_ops;
1109 }
1110diff -urNp linux-2.6.32.45/arch/ia64/kernel/module.c linux-2.6.32.45/arch/ia64/kernel/module.c
1111--- linux-2.6.32.45/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1112+++ linux-2.6.32.45/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1113@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1114 void
1115 module_free (struct module *mod, void *module_region)
1116 {
1117- if (mod && mod->arch.init_unw_table &&
1118- module_region == mod->module_init) {
1119+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1120 unw_remove_unwind_table(mod->arch.init_unw_table);
1121 mod->arch.init_unw_table = NULL;
1122 }
1123@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1124 }
1125
1126 static inline int
1127+in_init_rx (const struct module *mod, uint64_t addr)
1128+{
1129+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1130+}
1131+
1132+static inline int
1133+in_init_rw (const struct module *mod, uint64_t addr)
1134+{
1135+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1136+}
1137+
1138+static inline int
1139 in_init (const struct module *mod, uint64_t addr)
1140 {
1141- return addr - (uint64_t) mod->module_init < mod->init_size;
1142+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1143+}
1144+
1145+static inline int
1146+in_core_rx (const struct module *mod, uint64_t addr)
1147+{
1148+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1149+}
1150+
1151+static inline int
1152+in_core_rw (const struct module *mod, uint64_t addr)
1153+{
1154+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1155 }
1156
1157 static inline int
1158 in_core (const struct module *mod, uint64_t addr)
1159 {
1160- return addr - (uint64_t) mod->module_core < mod->core_size;
1161+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1162 }
1163
1164 static inline int
1165@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1166 break;
1167
1168 case RV_BDREL:
1169- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1170+ if (in_init_rx(mod, val))
1171+ val -= (uint64_t) mod->module_init_rx;
1172+ else if (in_init_rw(mod, val))
1173+ val -= (uint64_t) mod->module_init_rw;
1174+ else if (in_core_rx(mod, val))
1175+ val -= (uint64_t) mod->module_core_rx;
1176+ else if (in_core_rw(mod, val))
1177+ val -= (uint64_t) mod->module_core_rw;
1178 break;
1179
1180 case RV_LTV:
1181@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1182 * addresses have been selected...
1183 */
1184 uint64_t gp;
1185- if (mod->core_size > MAX_LTOFF)
1186+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1187 /*
1188 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1189 * at the end of the module.
1190 */
1191- gp = mod->core_size - MAX_LTOFF / 2;
1192+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1193 else
1194- gp = mod->core_size / 2;
1195- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1196+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1197+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1198 mod->arch.gp = gp;
1199 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1200 }
1201diff -urNp linux-2.6.32.45/arch/ia64/kernel/pci-dma.c linux-2.6.32.45/arch/ia64/kernel/pci-dma.c
1202--- linux-2.6.32.45/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1203+++ linux-2.6.32.45/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1204@@ -43,7 +43,7 @@ struct device fallback_dev = {
1205 .dma_mask = &fallback_dev.coherent_dma_mask,
1206 };
1207
1208-extern struct dma_map_ops intel_dma_ops;
1209+extern const struct dma_map_ops intel_dma_ops;
1210
1211 static int __init pci_iommu_init(void)
1212 {
1213@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1214 }
1215 EXPORT_SYMBOL(iommu_dma_supported);
1216
1217+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1218+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1219+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1220+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1221+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1222+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1223+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1224+
1225+static const struct dma_map_ops intel_iommu_dma_ops = {
1226+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1227+ .alloc_coherent = intel_alloc_coherent,
1228+ .free_coherent = intel_free_coherent,
1229+ .map_sg = intel_map_sg,
1230+ .unmap_sg = intel_unmap_sg,
1231+ .map_page = intel_map_page,
1232+ .unmap_page = intel_unmap_page,
1233+ .mapping_error = intel_mapping_error,
1234+
1235+ .sync_single_for_cpu = machvec_dma_sync_single,
1236+ .sync_sg_for_cpu = machvec_dma_sync_sg,
1237+ .sync_single_for_device = machvec_dma_sync_single,
1238+ .sync_sg_for_device = machvec_dma_sync_sg,
1239+ .dma_supported = iommu_dma_supported,
1240+};
1241+
1242 void __init pci_iommu_alloc(void)
1243 {
1244- dma_ops = &intel_dma_ops;
1245-
1246- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1247- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1248- dma_ops->sync_single_for_device = machvec_dma_sync_single;
1249- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1250- dma_ops->dma_supported = iommu_dma_supported;
1251+ dma_ops = &intel_iommu_dma_ops;
1252
1253 /*
1254 * The order of these functions is important for
1255diff -urNp linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c
1256--- linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1257+++ linux-2.6.32.45/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1258@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1259 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1260 }
1261
1262-struct dma_map_ops swiotlb_dma_ops = {
1263+const struct dma_map_ops swiotlb_dma_ops = {
1264 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1265 .free_coherent = swiotlb_free_coherent,
1266 .map_page = swiotlb_map_page,
1267diff -urNp linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c
1268--- linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1269+++ linux-2.6.32.45/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1270@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1271 if (REGION_NUMBER(addr) == RGN_HPAGE)
1272 addr = 0;
1273 #endif
1274+
1275+#ifdef CONFIG_PAX_RANDMMAP
1276+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1277+ addr = mm->free_area_cache;
1278+ else
1279+#endif
1280+
1281 if (!addr)
1282 addr = mm->free_area_cache;
1283
1284@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1285 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1286 /* At this point: (!vma || addr < vma->vm_end). */
1287 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1288- if (start_addr != TASK_UNMAPPED_BASE) {
1289+ if (start_addr != mm->mmap_base) {
1290 /* Start a new search --- just in case we missed some holes. */
1291- addr = TASK_UNMAPPED_BASE;
1292+ addr = mm->mmap_base;
1293 goto full_search;
1294 }
1295 return -ENOMEM;
1296 }
1297- if (!vma || addr + len <= vma->vm_start) {
1298+ if (check_heap_stack_gap(vma, addr, len)) {
1299 /* Remember the address where we stopped this search: */
1300 mm->free_area_cache = addr + len;
1301 return addr;
1302diff -urNp linux-2.6.32.45/arch/ia64/kernel/topology.c linux-2.6.32.45/arch/ia64/kernel/topology.c
1303--- linux-2.6.32.45/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1304+++ linux-2.6.32.45/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1305@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1306 return ret;
1307 }
1308
1309-static struct sysfs_ops cache_sysfs_ops = {
1310+static const struct sysfs_ops cache_sysfs_ops = {
1311 .show = cache_show
1312 };
1313
1314diff -urNp linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S
1315--- linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1316+++ linux-2.6.32.45/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1317@@ -190,7 +190,7 @@ SECTIONS
1318 /* Per-cpu data: */
1319 . = ALIGN(PERCPU_PAGE_SIZE);
1320 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1321- __phys_per_cpu_start = __per_cpu_load;
1322+ __phys_per_cpu_start = per_cpu_load;
1323 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1324 * into percpu page size
1325 */
1326diff -urNp linux-2.6.32.45/arch/ia64/mm/fault.c linux-2.6.32.45/arch/ia64/mm/fault.c
1327--- linux-2.6.32.45/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1328+++ linux-2.6.32.45/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1329@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1330 return pte_present(pte);
1331 }
1332
1333+#ifdef CONFIG_PAX_PAGEEXEC
1334+void pax_report_insns(void *pc, void *sp)
1335+{
1336+ unsigned long i;
1337+
1338+ printk(KERN_ERR "PAX: bytes at PC: ");
1339+ for (i = 0; i < 8; i++) {
1340+ unsigned int c;
1341+ if (get_user(c, (unsigned int *)pc+i))
1342+ printk(KERN_CONT "???????? ");
1343+ else
1344+ printk(KERN_CONT "%08x ", c);
1345+ }
1346+ printk("\n");
1347+}
1348+#endif
1349+
1350 void __kprobes
1351 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1352 {
1353@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1354 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1355 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1356
1357- if ((vma->vm_flags & mask) != mask)
1358+ if ((vma->vm_flags & mask) != mask) {
1359+
1360+#ifdef CONFIG_PAX_PAGEEXEC
1361+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1362+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1363+ goto bad_area;
1364+
1365+ up_read(&mm->mmap_sem);
1366+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1367+ do_group_exit(SIGKILL);
1368+ }
1369+#endif
1370+
1371 goto bad_area;
1372
1373+ }
1374+
1375 survive:
1376 /*
1377 * If for any reason at all we couldn't handle the fault, make
1378diff -urNp linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c
1379--- linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1380+++ linux-2.6.32.45/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1381@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1382 /* At this point: (!vmm || addr < vmm->vm_end). */
1383 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1384 return -ENOMEM;
1385- if (!vmm || (addr + len) <= vmm->vm_start)
1386+ if (check_heap_stack_gap(vmm, addr, len))
1387 return addr;
1388 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1389 }
1390diff -urNp linux-2.6.32.45/arch/ia64/mm/init.c linux-2.6.32.45/arch/ia64/mm/init.c
1391--- linux-2.6.32.45/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1392+++ linux-2.6.32.45/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1393@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1394 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1395 vma->vm_end = vma->vm_start + PAGE_SIZE;
1396 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1397+
1398+#ifdef CONFIG_PAX_PAGEEXEC
1399+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1400+ vma->vm_flags &= ~VM_EXEC;
1401+
1402+#ifdef CONFIG_PAX_MPROTECT
1403+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1404+ vma->vm_flags &= ~VM_MAYEXEC;
1405+#endif
1406+
1407+ }
1408+#endif
1409+
1410 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1411 down_write(&current->mm->mmap_sem);
1412 if (insert_vm_struct(current->mm, vma)) {
1413diff -urNp linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c
1414--- linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1415+++ linux-2.6.32.45/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1416@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1417 return ret;
1418 }
1419
1420-static struct dma_map_ops sn_dma_ops = {
1421+static const struct dma_map_ops sn_dma_ops = {
1422 .alloc_coherent = sn_dma_alloc_coherent,
1423 .free_coherent = sn_dma_free_coherent,
1424 .map_page = sn_dma_map_page,
1425diff -urNp linux-2.6.32.45/arch/m32r/lib/usercopy.c linux-2.6.32.45/arch/m32r/lib/usercopy.c
1426--- linux-2.6.32.45/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1427+++ linux-2.6.32.45/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1428@@ -14,6 +14,9 @@
1429 unsigned long
1430 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1431 {
1432+ if ((long)n < 0)
1433+ return n;
1434+
1435 prefetch(from);
1436 if (access_ok(VERIFY_WRITE, to, n))
1437 __copy_user(to,from,n);
1438@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1439 unsigned long
1440 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1441 {
1442+ if ((long)n < 0)
1443+ return n;
1444+
1445 prefetchw(to);
1446 if (access_ok(VERIFY_READ, from, n))
1447 __copy_user_zeroing(to,from,n);
1448diff -urNp linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c
1449--- linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1450+++ linux-2.6.32.45/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1451@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1452
1453 }
1454
1455-static struct platform_suspend_ops db1x_pm_ops = {
1456+static const struct platform_suspend_ops db1x_pm_ops = {
1457 .valid = suspend_valid_only_mem,
1458 .begin = db1x_pm_begin,
1459 .enter = db1x_pm_enter,
1460diff -urNp linux-2.6.32.45/arch/mips/include/asm/elf.h linux-2.6.32.45/arch/mips/include/asm/elf.h
1461--- linux-2.6.32.45/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1462+++ linux-2.6.32.45/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1463@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1464 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1465 #endif
1466
1467+#ifdef CONFIG_PAX_ASLR
1468+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1469+
1470+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1471+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1472+#endif
1473+
1474 #endif /* _ASM_ELF_H */
1475diff -urNp linux-2.6.32.45/arch/mips/include/asm/page.h linux-2.6.32.45/arch/mips/include/asm/page.h
1476--- linux-2.6.32.45/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1477+++ linux-2.6.32.45/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1478@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1479 #ifdef CONFIG_CPU_MIPS32
1480 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1481 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1482- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1483+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1484 #else
1485 typedef struct { unsigned long long pte; } pte_t;
1486 #define pte_val(x) ((x).pte)
1487diff -urNp linux-2.6.32.45/arch/mips/include/asm/reboot.h linux-2.6.32.45/arch/mips/include/asm/reboot.h
1488--- linux-2.6.32.45/arch/mips/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
1489+++ linux-2.6.32.45/arch/mips/include/asm/reboot.h 2011-08-21 17:35:02.000000000 -0400
1490@@ -9,7 +9,7 @@
1491 #ifndef _ASM_REBOOT_H
1492 #define _ASM_REBOOT_H
1493
1494-extern void (*_machine_restart)(char *command);
1495-extern void (*_machine_halt)(void);
1496+extern void (*__noreturn _machine_restart)(char *command);
1497+extern void (*__noreturn _machine_halt)(void);
1498
1499 #endif /* _ASM_REBOOT_H */
1500diff -urNp linux-2.6.32.45/arch/mips/include/asm/system.h linux-2.6.32.45/arch/mips/include/asm/system.h
1501--- linux-2.6.32.45/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1502+++ linux-2.6.32.45/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1503@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1504 */
1505 #define __ARCH_WANT_UNLOCKED_CTXSW
1506
1507-extern unsigned long arch_align_stack(unsigned long sp);
1508+#define arch_align_stack(x) ((x) & ~0xfUL)
1509
1510 #endif /* _ASM_SYSTEM_H */
1511diff -urNp linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c
1512--- linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1513+++ linux-2.6.32.45/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1514@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1515 #undef ELF_ET_DYN_BASE
1516 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1517
1518+#ifdef CONFIG_PAX_ASLR
1519+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1520+
1521+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1522+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1523+#endif
1524+
1525 #include <asm/processor.h>
1526 #include <linux/module.h>
1527 #include <linux/elfcore.h>
1528diff -urNp linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c
1529--- linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1530+++ linux-2.6.32.45/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1531@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1532 #undef ELF_ET_DYN_BASE
1533 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1534
1535+#ifdef CONFIG_PAX_ASLR
1536+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1537+
1538+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1539+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1540+#endif
1541+
1542 #include <asm/processor.h>
1543
1544 /*
1545diff -urNp linux-2.6.32.45/arch/mips/kernel/kgdb.c linux-2.6.32.45/arch/mips/kernel/kgdb.c
1546--- linux-2.6.32.45/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1547+++ linux-2.6.32.45/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1548@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1549 return -1;
1550 }
1551
1552+/* cannot be const */
1553 struct kgdb_arch arch_kgdb_ops;
1554
1555 /*
1556diff -urNp linux-2.6.32.45/arch/mips/kernel/process.c linux-2.6.32.45/arch/mips/kernel/process.c
1557--- linux-2.6.32.45/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1558+++ linux-2.6.32.45/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1559@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1560 out:
1561 return pc;
1562 }
1563-
1564-/*
1565- * Don't forget that the stack pointer must be aligned on a 8 bytes
1566- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1567- */
1568-unsigned long arch_align_stack(unsigned long sp)
1569-{
1570- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1571- sp -= get_random_int() & ~PAGE_MASK;
1572-
1573- return sp & ALMASK;
1574-}
1575diff -urNp linux-2.6.32.45/arch/mips/kernel/reset.c linux-2.6.32.45/arch/mips/kernel/reset.c
1576--- linux-2.6.32.45/arch/mips/kernel/reset.c 2011-03-27 14:31:47.000000000 -0400
1577+++ linux-2.6.32.45/arch/mips/kernel/reset.c 2011-08-21 17:35:26.000000000 -0400
1578@@ -19,8 +19,8 @@
1579 * So handle all using function pointers to machine specific
1580 * functions.
1581 */
1582-void (*_machine_restart)(char *command);
1583-void (*_machine_halt)(void);
1584+void (*__noreturn _machine_restart)(char *command);
1585+void (*__noreturn _machine_halt)(void);
1586 void (*pm_power_off)(void);
1587
1588 EXPORT_SYMBOL(pm_power_off);
1589@@ -29,16 +29,19 @@ void machine_restart(char *command)
1590 {
1591 if (_machine_restart)
1592 _machine_restart(command);
1593+ BUG();
1594 }
1595
1596 void machine_halt(void)
1597 {
1598 if (_machine_halt)
1599 _machine_halt();
1600+ BUG();
1601 }
1602
1603 void machine_power_off(void)
1604 {
1605 if (pm_power_off)
1606 pm_power_off();
1607+ BUG();
1608 }
1609diff -urNp linux-2.6.32.45/arch/mips/kernel/syscall.c linux-2.6.32.45/arch/mips/kernel/syscall.c
1610--- linux-2.6.32.45/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1611+++ linux-2.6.32.45/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1612@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1613 do_color_align = 0;
1614 if (filp || (flags & MAP_SHARED))
1615 do_color_align = 1;
1616+
1617+#ifdef CONFIG_PAX_RANDMMAP
1618+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1619+#endif
1620+
1621 if (addr) {
1622 if (do_color_align)
1623 addr = COLOUR_ALIGN(addr, pgoff);
1624 else
1625 addr = PAGE_ALIGN(addr);
1626 vmm = find_vma(current->mm, addr);
1627- if (task_size - len >= addr &&
1628- (!vmm || addr + len <= vmm->vm_start))
1629+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1630 return addr;
1631 }
1632- addr = TASK_UNMAPPED_BASE;
1633+ addr = current->mm->mmap_base;
1634 if (do_color_align)
1635 addr = COLOUR_ALIGN(addr, pgoff);
1636 else
1637@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1638 /* At this point: (!vmm || addr < vmm->vm_end). */
1639 if (task_size - len < addr)
1640 return -ENOMEM;
1641- if (!vmm || addr + len <= vmm->vm_start)
1642+ if (check_heap_stack_gap(vmm, addr, len))
1643 return addr;
1644 addr = vmm->vm_end;
1645 if (do_color_align)
1646diff -urNp linux-2.6.32.45/arch/mips/Makefile linux-2.6.32.45/arch/mips/Makefile
1647--- linux-2.6.32.45/arch/mips/Makefile 2011-03-27 14:31:47.000000000 -0400
1648+++ linux-2.6.32.45/arch/mips/Makefile 2011-08-21 19:26:52.000000000 -0400
1649@@ -51,6 +51,8 @@ endif
1650 cflags-y := -ffunction-sections
1651 cflags-y += $(call cc-option, -mno-check-zero-division)
1652
1653+cflags-y += -Wno-sign-compare -Wno-extra
1654+
1655 ifdef CONFIG_32BIT
1656 ld-emul = $(32bit-emul)
1657 vmlinux-32 = vmlinux
1658diff -urNp linux-2.6.32.45/arch/mips/mm/fault.c linux-2.6.32.45/arch/mips/mm/fault.c
1659--- linux-2.6.32.45/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1660+++ linux-2.6.32.45/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1661@@ -26,6 +26,23 @@
1662 #include <asm/ptrace.h>
1663 #include <asm/highmem.h> /* For VMALLOC_END */
1664
1665+#ifdef CONFIG_PAX_PAGEEXEC
1666+void pax_report_insns(void *pc, void *sp)
1667+{
1668+ unsigned long i;
1669+
1670+ printk(KERN_ERR "PAX: bytes at PC: ");
1671+ for (i = 0; i < 5; i++) {
1672+ unsigned int c;
1673+ if (get_user(c, (unsigned int *)pc+i))
1674+ printk(KERN_CONT "???????? ");
1675+ else
1676+ printk(KERN_CONT "%08x ", c);
1677+ }
1678+ printk("\n");
1679+}
1680+#endif
1681+
1682 /*
1683 * This routine handles page faults. It determines the address,
1684 * and the problem, and then passes it off to one of the appropriate
1685diff -urNp linux-2.6.32.45/arch/parisc/include/asm/elf.h linux-2.6.32.45/arch/parisc/include/asm/elf.h
1686--- linux-2.6.32.45/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1687+++ linux-2.6.32.45/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1688@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1689
1690 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1691
1692+#ifdef CONFIG_PAX_ASLR
1693+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1694+
1695+#define PAX_DELTA_MMAP_LEN 16
1696+#define PAX_DELTA_STACK_LEN 16
1697+#endif
1698+
1699 /* This yields a mask that user programs can use to figure out what
1700 instruction set this CPU supports. This could be done in user space,
1701 but it's not easy, and we've already done it here. */
1702diff -urNp linux-2.6.32.45/arch/parisc/include/asm/pgtable.h linux-2.6.32.45/arch/parisc/include/asm/pgtable.h
1703--- linux-2.6.32.45/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1704+++ linux-2.6.32.45/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1705@@ -207,6 +207,17 @@
1706 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1707 #define PAGE_COPY PAGE_EXECREAD
1708 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1709+
1710+#ifdef CONFIG_PAX_PAGEEXEC
1711+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1712+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1713+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1714+#else
1715+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1716+# define PAGE_COPY_NOEXEC PAGE_COPY
1717+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1718+#endif
1719+
1720 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1721 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1722 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1723diff -urNp linux-2.6.32.45/arch/parisc/kernel/module.c linux-2.6.32.45/arch/parisc/kernel/module.c
1724--- linux-2.6.32.45/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1725+++ linux-2.6.32.45/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1726@@ -95,16 +95,38 @@
1727
1728 /* three functions to determine where in the module core
1729 * or init pieces the location is */
1730+static inline int in_init_rx(struct module *me, void *loc)
1731+{
1732+ return (loc >= me->module_init_rx &&
1733+ loc < (me->module_init_rx + me->init_size_rx));
1734+}
1735+
1736+static inline int in_init_rw(struct module *me, void *loc)
1737+{
1738+ return (loc >= me->module_init_rw &&
1739+ loc < (me->module_init_rw + me->init_size_rw));
1740+}
1741+
1742 static inline int in_init(struct module *me, void *loc)
1743 {
1744- return (loc >= me->module_init &&
1745- loc <= (me->module_init + me->init_size));
1746+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1747+}
1748+
1749+static inline int in_core_rx(struct module *me, void *loc)
1750+{
1751+ return (loc >= me->module_core_rx &&
1752+ loc < (me->module_core_rx + me->core_size_rx));
1753+}
1754+
1755+static inline int in_core_rw(struct module *me, void *loc)
1756+{
1757+ return (loc >= me->module_core_rw &&
1758+ loc < (me->module_core_rw + me->core_size_rw));
1759 }
1760
1761 static inline int in_core(struct module *me, void *loc)
1762 {
1763- return (loc >= me->module_core &&
1764- loc <= (me->module_core + me->core_size));
1765+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1766 }
1767
1768 static inline int in_local(struct module *me, void *loc)
1769@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1770 }
1771
1772 /* align things a bit */
1773- me->core_size = ALIGN(me->core_size, 16);
1774- me->arch.got_offset = me->core_size;
1775- me->core_size += gots * sizeof(struct got_entry);
1776-
1777- me->core_size = ALIGN(me->core_size, 16);
1778- me->arch.fdesc_offset = me->core_size;
1779- me->core_size += fdescs * sizeof(Elf_Fdesc);
1780+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1781+ me->arch.got_offset = me->core_size_rw;
1782+ me->core_size_rw += gots * sizeof(struct got_entry);
1783+
1784+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1785+ me->arch.fdesc_offset = me->core_size_rw;
1786+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1787
1788 me->arch.got_max = gots;
1789 me->arch.fdesc_max = fdescs;
1790@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1791
1792 BUG_ON(value == 0);
1793
1794- got = me->module_core + me->arch.got_offset;
1795+ got = me->module_core_rw + me->arch.got_offset;
1796 for (i = 0; got[i].addr; i++)
1797 if (got[i].addr == value)
1798 goto out;
1799@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1800 #ifdef CONFIG_64BIT
1801 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1802 {
1803- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1804+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1805
1806 if (!value) {
1807 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1808@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1809
1810 /* Create new one */
1811 fdesc->addr = value;
1812- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1813+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1814 return (Elf_Addr)fdesc;
1815 }
1816 #endif /* CONFIG_64BIT */
1817@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1818
1819 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1820 end = table + sechdrs[me->arch.unwind_section].sh_size;
1821- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1822+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1823
1824 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1825 me->arch.unwind_section, table, end, gp);
1826diff -urNp linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c
1827--- linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1828+++ linux-2.6.32.45/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1829@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1830 /* At this point: (!vma || addr < vma->vm_end). */
1831 if (TASK_SIZE - len < addr)
1832 return -ENOMEM;
1833- if (!vma || addr + len <= vma->vm_start)
1834+ if (check_heap_stack_gap(vma, addr, len))
1835 return addr;
1836 addr = vma->vm_end;
1837 }
1838@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1839 /* At this point: (!vma || addr < vma->vm_end). */
1840 if (TASK_SIZE - len < addr)
1841 return -ENOMEM;
1842- if (!vma || addr + len <= vma->vm_start)
1843+ if (check_heap_stack_gap(vma, addr, len))
1844 return addr;
1845 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1846 if (addr < vma->vm_end) /* handle wraparound */
1847@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1848 if (flags & MAP_FIXED)
1849 return addr;
1850 if (!addr)
1851- addr = TASK_UNMAPPED_BASE;
1852+ addr = current->mm->mmap_base;
1853
1854 if (filp) {
1855 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1856diff -urNp linux-2.6.32.45/arch/parisc/kernel/traps.c linux-2.6.32.45/arch/parisc/kernel/traps.c
1857--- linux-2.6.32.45/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1858+++ linux-2.6.32.45/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1859@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1860
1861 down_read(&current->mm->mmap_sem);
1862 vma = find_vma(current->mm,regs->iaoq[0]);
1863- if (vma && (regs->iaoq[0] >= vma->vm_start)
1864- && (vma->vm_flags & VM_EXEC)) {
1865-
1866+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1867 fault_address = regs->iaoq[0];
1868 fault_space = regs->iasq[0];
1869
1870diff -urNp linux-2.6.32.45/arch/parisc/mm/fault.c linux-2.6.32.45/arch/parisc/mm/fault.c
1871--- linux-2.6.32.45/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1872+++ linux-2.6.32.45/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1873@@ -15,6 +15,7 @@
1874 #include <linux/sched.h>
1875 #include <linux/interrupt.h>
1876 #include <linux/module.h>
1877+#include <linux/unistd.h>
1878
1879 #include <asm/uaccess.h>
1880 #include <asm/traps.h>
1881@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1882 static unsigned long
1883 parisc_acctyp(unsigned long code, unsigned int inst)
1884 {
1885- if (code == 6 || code == 16)
1886+ if (code == 6 || code == 7 || code == 16)
1887 return VM_EXEC;
1888
1889 switch (inst & 0xf0000000) {
1890@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1891 }
1892 #endif
1893
1894+#ifdef CONFIG_PAX_PAGEEXEC
1895+/*
1896+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1897+ *
1898+ * returns 1 when task should be killed
1899+ * 2 when rt_sigreturn trampoline was detected
1900+ * 3 when unpatched PLT trampoline was detected
1901+ */
1902+static int pax_handle_fetch_fault(struct pt_regs *regs)
1903+{
1904+
1905+#ifdef CONFIG_PAX_EMUPLT
1906+ int err;
1907+
1908+ do { /* PaX: unpatched PLT emulation */
1909+ unsigned int bl, depwi;
1910+
1911+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1912+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1913+
1914+ if (err)
1915+ break;
1916+
1917+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1918+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1919+
1920+ err = get_user(ldw, (unsigned int *)addr);
1921+ err |= get_user(bv, (unsigned int *)(addr+4));
1922+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1923+
1924+ if (err)
1925+ break;
1926+
1927+ if (ldw == 0x0E801096U &&
1928+ bv == 0xEAC0C000U &&
1929+ ldw2 == 0x0E881095U)
1930+ {
1931+ unsigned int resolver, map;
1932+
1933+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1934+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1935+ if (err)
1936+ break;
1937+
1938+ regs->gr[20] = instruction_pointer(regs)+8;
1939+ regs->gr[21] = map;
1940+ regs->gr[22] = resolver;
1941+ regs->iaoq[0] = resolver | 3UL;
1942+ regs->iaoq[1] = regs->iaoq[0] + 4;
1943+ return 3;
1944+ }
1945+ }
1946+ } while (0);
1947+#endif
1948+
1949+#ifdef CONFIG_PAX_EMUTRAMP
1950+
1951+#ifndef CONFIG_PAX_EMUSIGRT
1952+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1953+ return 1;
1954+#endif
1955+
1956+ do { /* PaX: rt_sigreturn emulation */
1957+ unsigned int ldi1, ldi2, bel, nop;
1958+
1959+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1960+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1961+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1962+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1963+
1964+ if (err)
1965+ break;
1966+
1967+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1968+ ldi2 == 0x3414015AU &&
1969+ bel == 0xE4008200U &&
1970+ nop == 0x08000240U)
1971+ {
1972+ regs->gr[25] = (ldi1 & 2) >> 1;
1973+ regs->gr[20] = __NR_rt_sigreturn;
1974+ regs->gr[31] = regs->iaoq[1] + 16;
1975+ regs->sr[0] = regs->iasq[1];
1976+ regs->iaoq[0] = 0x100UL;
1977+ regs->iaoq[1] = regs->iaoq[0] + 4;
1978+ regs->iasq[0] = regs->sr[2];
1979+ regs->iasq[1] = regs->sr[2];
1980+ return 2;
1981+ }
1982+ } while (0);
1983+#endif
1984+
1985+ return 1;
1986+}
1987+
1988+void pax_report_insns(void *pc, void *sp)
1989+{
1990+ unsigned long i;
1991+
1992+ printk(KERN_ERR "PAX: bytes at PC: ");
1993+ for (i = 0; i < 5; i++) {
1994+ unsigned int c;
1995+ if (get_user(c, (unsigned int *)pc+i))
1996+ printk(KERN_CONT "???????? ");
1997+ else
1998+ printk(KERN_CONT "%08x ", c);
1999+ }
2000+ printk("\n");
2001+}
2002+#endif
2003+
2004 int fixup_exception(struct pt_regs *regs)
2005 {
2006 const struct exception_table_entry *fix;
2007@@ -192,8 +303,33 @@ good_area:
2008
2009 acc_type = parisc_acctyp(code,regs->iir);
2010
2011- if ((vma->vm_flags & acc_type) != acc_type)
2012+ if ((vma->vm_flags & acc_type) != acc_type) {
2013+
2014+#ifdef CONFIG_PAX_PAGEEXEC
2015+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2016+ (address & ~3UL) == instruction_pointer(regs))
2017+ {
2018+ up_read(&mm->mmap_sem);
2019+ switch (pax_handle_fetch_fault(regs)) {
2020+
2021+#ifdef CONFIG_PAX_EMUPLT
2022+ case 3:
2023+ return;
2024+#endif
2025+
2026+#ifdef CONFIG_PAX_EMUTRAMP
2027+ case 2:
2028+ return;
2029+#endif
2030+
2031+ }
2032+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2033+ do_group_exit(SIGKILL);
2034+ }
2035+#endif
2036+
2037 goto bad_area;
2038+ }
2039
2040 /*
2041 * If for any reason at all we couldn't handle the fault, make
2042diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/device.h linux-2.6.32.45/arch/powerpc/include/asm/device.h
2043--- linux-2.6.32.45/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
2044+++ linux-2.6.32.45/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
2045@@ -14,7 +14,7 @@ struct dev_archdata {
2046 struct device_node *of_node;
2047
2048 /* DMA operations on that device */
2049- struct dma_map_ops *dma_ops;
2050+ const struct dma_map_ops *dma_ops;
2051
2052 /*
2053 * When an iommu is in use, dma_data is used as a ptr to the base of the
2054diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h
2055--- linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
2056+++ linux-2.6.32.45/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
2057@@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
2058 #ifdef CONFIG_PPC64
2059 extern struct dma_map_ops dma_iommu_ops;
2060 #endif
2061-extern struct dma_map_ops dma_direct_ops;
2062+extern const struct dma_map_ops dma_direct_ops;
2063
2064-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2065+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2066 {
2067 /* We don't handle the NULL dev case for ISA for now. We could
2068 * do it via an out of line call but it is not needed for now. The
2069@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
2070 return dev->archdata.dma_ops;
2071 }
2072
2073-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2074+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2075 {
2076 dev->archdata.dma_ops = ops;
2077 }
2078@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
2079
2080 static inline int dma_supported(struct device *dev, u64 mask)
2081 {
2082- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2083+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2084
2085 if (unlikely(dma_ops == NULL))
2086 return 0;
2087@@ -132,7 +132,7 @@ static inline int dma_supported(struct d
2088
2089 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2090 {
2091- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2092+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2093
2094 if (unlikely(dma_ops == NULL))
2095 return -EIO;
2096@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
2097 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2098 dma_addr_t *dma_handle, gfp_t flag)
2099 {
2100- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2101+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2102 void *cpu_addr;
2103
2104 BUG_ON(!dma_ops);
2105@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
2106 static inline void dma_free_coherent(struct device *dev, size_t size,
2107 void *cpu_addr, dma_addr_t dma_handle)
2108 {
2109- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2110+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2111
2112 BUG_ON(!dma_ops);
2113
2114@@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
2115
2116 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2117 {
2118- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2119+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2120
2121 if (dma_ops->mapping_error)
2122 return dma_ops->mapping_error(dev, dma_addr);
2123diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/elf.h linux-2.6.32.45/arch/powerpc/include/asm/elf.h
2124--- linux-2.6.32.45/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2125+++ linux-2.6.32.45/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2126@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2127 the loader. We need to make sure that it is out of the way of the program
2128 that it will "exec", and that there is sufficient room for the brk. */
2129
2130-extern unsigned long randomize_et_dyn(unsigned long base);
2131-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2132+#define ELF_ET_DYN_BASE (0x20000000)
2133+
2134+#ifdef CONFIG_PAX_ASLR
2135+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2136+
2137+#ifdef __powerpc64__
2138+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2139+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2140+#else
2141+#define PAX_DELTA_MMAP_LEN 15
2142+#define PAX_DELTA_STACK_LEN 15
2143+#endif
2144+#endif
2145
2146 /*
2147 * Our registers are always unsigned longs, whether we're a 32 bit
2148@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
2149 (0x7ff >> (PAGE_SHIFT - 12)) : \
2150 (0x3ffff >> (PAGE_SHIFT - 12)))
2151
2152-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2153-#define arch_randomize_brk arch_randomize_brk
2154-
2155 #endif /* __KERNEL__ */
2156
2157 /*
2158diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/iommu.h linux-2.6.32.45/arch/powerpc/include/asm/iommu.h
2159--- linux-2.6.32.45/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
2160+++ linux-2.6.32.45/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
2161@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2162 extern void iommu_init_early_dart(void);
2163 extern void iommu_init_early_pasemi(void);
2164
2165+/* dma-iommu.c */
2166+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2167+
2168 #ifdef CONFIG_PCI
2169 extern void pci_iommu_init(void);
2170 extern void pci_direct_iommu_init(void);
2171diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h
2172--- linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
2173+++ linux-2.6.32.45/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
2174@@ -26,6 +26,7 @@ enum km_type {
2175 KM_SOFTIRQ1,
2176 KM_PPC_SYNC_PAGE,
2177 KM_PPC_SYNC_ICACHE,
2178+ KM_CLEARPAGE,
2179 KM_TYPE_NR
2180 };
2181
2182diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/page_64.h linux-2.6.32.45/arch/powerpc/include/asm/page_64.h
2183--- linux-2.6.32.45/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
2184+++ linux-2.6.32.45/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
2185@@ -180,15 +180,18 @@ do { \
2186 * stack by default, so in the absense of a PT_GNU_STACK program header
2187 * we turn execute permission off.
2188 */
2189-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2190- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2191+#define VM_STACK_DEFAULT_FLAGS32 \
2192+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2193+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2194
2195 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2196 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2197
2198+#ifndef CONFIG_PAX_PAGEEXEC
2199 #define VM_STACK_DEFAULT_FLAGS \
2200 (test_thread_flag(TIF_32BIT) ? \
2201 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2202+#endif
2203
2204 #include <asm-generic/getorder.h>
2205
2206diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/page.h linux-2.6.32.45/arch/powerpc/include/asm/page.h
2207--- linux-2.6.32.45/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2208+++ linux-2.6.32.45/arch/powerpc/include/asm/page.h 2011-08-21 16:07:39.000000000 -0400
2209@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2210 * and needs to be executable. This means the whole heap ends
2211 * up being executable.
2212 */
2213-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2214- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2215+#define VM_DATA_DEFAULT_FLAGS32 \
2216+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2217+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2218
2219 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2220 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2221@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2222 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2223 #endif
2224
2225+#define ktla_ktva(addr) (addr)
2226+#define ktva_ktla(addr) (addr)
2227+
2228 #ifndef __ASSEMBLY__
2229
2230 #undef STRICT_MM_TYPECHECKS
2231diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pci.h linux-2.6.32.45/arch/powerpc/include/asm/pci.h
2232--- linux-2.6.32.45/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2233+++ linux-2.6.32.45/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2234@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2235 }
2236
2237 #ifdef CONFIG_PCI
2238-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2239-extern struct dma_map_ops *get_pci_dma_ops(void);
2240+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2241+extern const struct dma_map_ops *get_pci_dma_ops(void);
2242 #else /* CONFIG_PCI */
2243 #define set_pci_dma_ops(d)
2244 #define get_pci_dma_ops() NULL
2245diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h
2246--- linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2247+++ linux-2.6.32.45/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2248@@ -2,6 +2,7 @@
2249 #define _ASM_POWERPC_PGTABLE_H
2250 #ifdef __KERNEL__
2251
2252+#include <linux/const.h>
2253 #ifndef __ASSEMBLY__
2254 #include <asm/processor.h> /* For TASK_SIZE */
2255 #include <asm/mmu.h>
2256diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h
2257--- linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2258+++ linux-2.6.32.45/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2259@@ -21,6 +21,7 @@
2260 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2261 #define _PAGE_USER 0x004 /* usermode access allowed */
2262 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2263+#define _PAGE_EXEC _PAGE_GUARDED
2264 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2265 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2266 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2267diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h
2268--- linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
2269+++ linux-2.6.32.45/arch/powerpc/include/asm/ptrace.h 2011-08-21 15:53:58.000000000 -0400
2270@@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct p
2271 } while(0)
2272
2273 struct task_struct;
2274-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2275+extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2276 extern int ptrace_put_reg(struct task_struct *task, int regno,
2277 unsigned long data);
2278
2279diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/reg.h linux-2.6.32.45/arch/powerpc/include/asm/reg.h
2280--- linux-2.6.32.45/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2281+++ linux-2.6.32.45/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2282@@ -191,6 +191,7 @@
2283 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2284 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2285 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2286+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2287 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2288 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2289 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2290diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h
2291--- linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2292+++ linux-2.6.32.45/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2293@@ -13,7 +13,7 @@
2294
2295 #include <linux/swiotlb.h>
2296
2297-extern struct dma_map_ops swiotlb_dma_ops;
2298+extern const struct dma_map_ops swiotlb_dma_ops;
2299
2300 static inline void dma_mark_clean(void *addr, size_t size) {}
2301
2302diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/system.h linux-2.6.32.45/arch/powerpc/include/asm/system.h
2303--- linux-2.6.32.45/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2304+++ linux-2.6.32.45/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2305@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2306 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2307 #endif
2308
2309-extern unsigned long arch_align_stack(unsigned long sp);
2310+#define arch_align_stack(x) ((x) & ~0xfUL)
2311
2312 /* Used in very early kernel initialization. */
2313 extern unsigned long reloc_offset(void);
2314diff -urNp linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h
2315--- linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2316+++ linux-2.6.32.45/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2317@@ -13,6 +13,8 @@
2318 #define VERIFY_READ 0
2319 #define VERIFY_WRITE 1
2320
2321+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2322+
2323 /*
2324 * The fs value determines whether argument validity checking should be
2325 * performed or not. If get_fs() == USER_DS, checking is performed, with
2326@@ -327,52 +329,6 @@ do { \
2327 extern unsigned long __copy_tofrom_user(void __user *to,
2328 const void __user *from, unsigned long size);
2329
2330-#ifndef __powerpc64__
2331-
2332-static inline unsigned long copy_from_user(void *to,
2333- const void __user *from, unsigned long n)
2334-{
2335- unsigned long over;
2336-
2337- if (access_ok(VERIFY_READ, from, n))
2338- return __copy_tofrom_user((__force void __user *)to, from, n);
2339- if ((unsigned long)from < TASK_SIZE) {
2340- over = (unsigned long)from + n - TASK_SIZE;
2341- return __copy_tofrom_user((__force void __user *)to, from,
2342- n - over) + over;
2343- }
2344- return n;
2345-}
2346-
2347-static inline unsigned long copy_to_user(void __user *to,
2348- const void *from, unsigned long n)
2349-{
2350- unsigned long over;
2351-
2352- if (access_ok(VERIFY_WRITE, to, n))
2353- return __copy_tofrom_user(to, (__force void __user *)from, n);
2354- if ((unsigned long)to < TASK_SIZE) {
2355- over = (unsigned long)to + n - TASK_SIZE;
2356- return __copy_tofrom_user(to, (__force void __user *)from,
2357- n - over) + over;
2358- }
2359- return n;
2360-}
2361-
2362-#else /* __powerpc64__ */
2363-
2364-#define __copy_in_user(to, from, size) \
2365- __copy_tofrom_user((to), (from), (size))
2366-
2367-extern unsigned long copy_from_user(void *to, const void __user *from,
2368- unsigned long n);
2369-extern unsigned long copy_to_user(void __user *to, const void *from,
2370- unsigned long n);
2371-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2372- unsigned long n);
2373-
2374-#endif /* __powerpc64__ */
2375-
2376 static inline unsigned long __copy_from_user_inatomic(void *to,
2377 const void __user *from, unsigned long n)
2378 {
2379@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2380 if (ret == 0)
2381 return 0;
2382 }
2383+
2384+ if (!__builtin_constant_p(n))
2385+ check_object_size(to, n, false);
2386+
2387 return __copy_tofrom_user((__force void __user *)to, from, n);
2388 }
2389
2390@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2391 if (ret == 0)
2392 return 0;
2393 }
2394+
2395+ if (!__builtin_constant_p(n))
2396+ check_object_size(from, n, true);
2397+
2398 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2399 }
2400
2401@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2402 return __copy_to_user_inatomic(to, from, size);
2403 }
2404
2405+#ifndef __powerpc64__
2406+
2407+static inline unsigned long __must_check copy_from_user(void *to,
2408+ const void __user *from, unsigned long n)
2409+{
2410+ unsigned long over;
2411+
2412+ if ((long)n < 0)
2413+ return n;
2414+
2415+ if (access_ok(VERIFY_READ, from, n)) {
2416+ if (!__builtin_constant_p(n))
2417+ check_object_size(to, n, false);
2418+ return __copy_tofrom_user((__force void __user *)to, from, n);
2419+ }
2420+ if ((unsigned long)from < TASK_SIZE) {
2421+ over = (unsigned long)from + n - TASK_SIZE;
2422+ if (!__builtin_constant_p(n - over))
2423+ check_object_size(to, n - over, false);
2424+ return __copy_tofrom_user((__force void __user *)to, from,
2425+ n - over) + over;
2426+ }
2427+ return n;
2428+}
2429+
2430+static inline unsigned long __must_check copy_to_user(void __user *to,
2431+ const void *from, unsigned long n)
2432+{
2433+ unsigned long over;
2434+
2435+ if ((long)n < 0)
2436+ return n;
2437+
2438+ if (access_ok(VERIFY_WRITE, to, n)) {
2439+ if (!__builtin_constant_p(n))
2440+ check_object_size(from, n, true);
2441+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2442+ }
2443+ if ((unsigned long)to < TASK_SIZE) {
2444+ over = (unsigned long)to + n - TASK_SIZE;
2445+ if (!__builtin_constant_p(n))
2446+ check_object_size(from, n - over, true);
2447+ return __copy_tofrom_user(to, (__force void __user *)from,
2448+ n - over) + over;
2449+ }
2450+ return n;
2451+}
2452+
2453+#else /* __powerpc64__ */
2454+
2455+#define __copy_in_user(to, from, size) \
2456+ __copy_tofrom_user((to), (from), (size))
2457+
2458+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2459+{
2460+ if ((long)n < 0 || n > INT_MAX)
2461+ return n;
2462+
2463+ if (!__builtin_constant_p(n))
2464+ check_object_size(to, n, false);
2465+
2466+ if (likely(access_ok(VERIFY_READ, from, n)))
2467+ n = __copy_from_user(to, from, n);
2468+ else
2469+ memset(to, 0, n);
2470+ return n;
2471+}
2472+
2473+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2474+{
2475+ if ((long)n < 0 || n > INT_MAX)
2476+ return n;
2477+
2478+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2479+ if (!__builtin_constant_p(n))
2480+ check_object_size(from, n, true);
2481+ n = __copy_to_user(to, from, n);
2482+ }
2483+ return n;
2484+}
2485+
2486+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2487+ unsigned long n);
2488+
2489+#endif /* __powerpc64__ */
2490+
2491 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2492
2493 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2494diff -urNp linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c
2495--- linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2496+++ linux-2.6.32.45/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2497@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2498 &cache_assoc_attr,
2499 };
2500
2501-static struct sysfs_ops cache_index_ops = {
2502+static const struct sysfs_ops cache_index_ops = {
2503 .show = cache_index_show,
2504 };
2505
2506diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma.c linux-2.6.32.45/arch/powerpc/kernel/dma.c
2507--- linux-2.6.32.45/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2508+++ linux-2.6.32.45/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2509@@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2510 }
2511 #endif
2512
2513-struct dma_map_ops dma_direct_ops = {
2514+const struct dma_map_ops dma_direct_ops = {
2515 .alloc_coherent = dma_direct_alloc_coherent,
2516 .free_coherent = dma_direct_free_coherent,
2517 .map_sg = dma_direct_map_sg,
2518diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c
2519--- linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2520+++ linux-2.6.32.45/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2521@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2522 }
2523
2524 /* We support DMA to/from any memory page via the iommu */
2525-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2526+int dma_iommu_dma_supported(struct device *dev, u64 mask)
2527 {
2528 struct iommu_table *tbl = get_iommu_table_base(dev);
2529
2530diff -urNp linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c
2531--- linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2532+++ linux-2.6.32.45/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2533@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2534 * map_page, and unmap_page on highmem, use normal dma_ops
2535 * for everything else.
2536 */
2537-struct dma_map_ops swiotlb_dma_ops = {
2538+const struct dma_map_ops swiotlb_dma_ops = {
2539 .alloc_coherent = dma_direct_alloc_coherent,
2540 .free_coherent = dma_direct_free_coherent,
2541 .map_sg = swiotlb_map_sg_attrs,
2542diff -urNp linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S
2543--- linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2544+++ linux-2.6.32.45/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2545@@ -455,6 +455,7 @@ storage_fault_common:
2546 std r14,_DAR(r1)
2547 std r15,_DSISR(r1)
2548 addi r3,r1,STACK_FRAME_OVERHEAD
2549+ bl .save_nvgprs
2550 mr r4,r14
2551 mr r5,r15
2552 ld r14,PACA_EXGEN+EX_R14(r13)
2553@@ -464,8 +465,7 @@ storage_fault_common:
2554 cmpdi r3,0
2555 bne- 1f
2556 b .ret_from_except_lite
2557-1: bl .save_nvgprs
2558- mr r5,r3
2559+1: mr r5,r3
2560 addi r3,r1,STACK_FRAME_OVERHEAD
2561 ld r4,_DAR(r1)
2562 bl .bad_page_fault
2563diff -urNp linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S
2564--- linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2565+++ linux-2.6.32.45/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2566@@ -818,10 +818,10 @@ handle_page_fault:
2567 11: ld r4,_DAR(r1)
2568 ld r5,_DSISR(r1)
2569 addi r3,r1,STACK_FRAME_OVERHEAD
2570+ bl .save_nvgprs
2571 bl .do_page_fault
2572 cmpdi r3,0
2573 beq+ 13f
2574- bl .save_nvgprs
2575 mr r5,r3
2576 addi r3,r1,STACK_FRAME_OVERHEAD
2577 lwz r4,_DAR(r1)
2578diff -urNp linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c
2579--- linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2580+++ linux-2.6.32.45/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2581@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2582 return 1;
2583 }
2584
2585-static struct dma_map_ops ibmebus_dma_ops = {
2586+static const struct dma_map_ops ibmebus_dma_ops = {
2587 .alloc_coherent = ibmebus_alloc_coherent,
2588 .free_coherent = ibmebus_free_coherent,
2589 .map_sg = ibmebus_map_sg,
2590diff -urNp linux-2.6.32.45/arch/powerpc/kernel/kgdb.c linux-2.6.32.45/arch/powerpc/kernel/kgdb.c
2591--- linux-2.6.32.45/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2592+++ linux-2.6.32.45/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2593@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2594 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2595 return 0;
2596
2597- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2598+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2599 regs->nip += 4;
2600
2601 return 1;
2602@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2603 /*
2604 * Global data
2605 */
2606-struct kgdb_arch arch_kgdb_ops = {
2607+const struct kgdb_arch arch_kgdb_ops = {
2608 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2609 };
2610
2611diff -urNp linux-2.6.32.45/arch/powerpc/kernel/module_32.c linux-2.6.32.45/arch/powerpc/kernel/module_32.c
2612--- linux-2.6.32.45/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2613+++ linux-2.6.32.45/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2614@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2615 me->arch.core_plt_section = i;
2616 }
2617 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2618- printk("Module doesn't contain .plt or .init.plt sections.\n");
2619+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2620 return -ENOEXEC;
2621 }
2622
2623@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2624
2625 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2626 /* Init, or core PLT? */
2627- if (location >= mod->module_core
2628- && location < mod->module_core + mod->core_size)
2629+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2630+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2631 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2632- else
2633+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2634+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2635 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2636+ else {
2637+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2638+ return ~0UL;
2639+ }
2640
2641 /* Find this entry, or if that fails, the next avail. entry */
2642 while (entry->jump[0]) {
2643diff -urNp linux-2.6.32.45/arch/powerpc/kernel/module.c linux-2.6.32.45/arch/powerpc/kernel/module.c
2644--- linux-2.6.32.45/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2645+++ linux-2.6.32.45/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2646@@ -31,11 +31,24 @@
2647
2648 LIST_HEAD(module_bug_list);
2649
2650+#ifdef CONFIG_PAX_KERNEXEC
2651 void *module_alloc(unsigned long size)
2652 {
2653 if (size == 0)
2654 return NULL;
2655
2656+ return vmalloc(size);
2657+}
2658+
2659+void *module_alloc_exec(unsigned long size)
2660+#else
2661+void *module_alloc(unsigned long size)
2662+#endif
2663+
2664+{
2665+ if (size == 0)
2666+ return NULL;
2667+
2668 return vmalloc_exec(size);
2669 }
2670
2671@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2672 vfree(module_region);
2673 }
2674
2675+#ifdef CONFIG_PAX_KERNEXEC
2676+void module_free_exec(struct module *mod, void *module_region)
2677+{
2678+ module_free(mod, module_region);
2679+}
2680+#endif
2681+
2682 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2683 const Elf_Shdr *sechdrs,
2684 const char *name)
2685diff -urNp linux-2.6.32.45/arch/powerpc/kernel/pci-common.c linux-2.6.32.45/arch/powerpc/kernel/pci-common.c
2686--- linux-2.6.32.45/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2687+++ linux-2.6.32.45/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2688@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2689 unsigned int ppc_pci_flags = 0;
2690
2691
2692-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2693+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2694
2695-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2696+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2697 {
2698 pci_dma_ops = dma_ops;
2699 }
2700
2701-struct dma_map_ops *get_pci_dma_ops(void)
2702+const struct dma_map_ops *get_pci_dma_ops(void)
2703 {
2704 return pci_dma_ops;
2705 }
2706diff -urNp linux-2.6.32.45/arch/powerpc/kernel/process.c linux-2.6.32.45/arch/powerpc/kernel/process.c
2707--- linux-2.6.32.45/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2708+++ linux-2.6.32.45/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2709@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2710 * Lookup NIP late so we have the best change of getting the
2711 * above info out without failing
2712 */
2713- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2714- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2715+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2716+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2717 #endif
2718 show_stack(current, (unsigned long *) regs->gpr[1]);
2719 if (!user_mode(regs))
2720@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2721 newsp = stack[0];
2722 ip = stack[STACK_FRAME_LR_SAVE];
2723 if (!firstframe || ip != lr) {
2724- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2725+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2726 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2727 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2728- printk(" (%pS)",
2729+ printk(" (%pA)",
2730 (void *)current->ret_stack[curr_frame].ret);
2731 curr_frame--;
2732 }
2733@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2734 struct pt_regs *regs = (struct pt_regs *)
2735 (sp + STACK_FRAME_OVERHEAD);
2736 lr = regs->link;
2737- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2738+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2739 regs->trap, (void *)regs->nip, (void *)lr);
2740 firstframe = 1;
2741 }
2742@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2743 }
2744
2745 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2746-
2747-unsigned long arch_align_stack(unsigned long sp)
2748-{
2749- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2750- sp -= get_random_int() & ~PAGE_MASK;
2751- return sp & ~0xf;
2752-}
2753-
2754-static inline unsigned long brk_rnd(void)
2755-{
2756- unsigned long rnd = 0;
2757-
2758- /* 8MB for 32bit, 1GB for 64bit */
2759- if (is_32bit_task())
2760- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2761- else
2762- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2763-
2764- return rnd << PAGE_SHIFT;
2765-}
2766-
2767-unsigned long arch_randomize_brk(struct mm_struct *mm)
2768-{
2769- unsigned long base = mm->brk;
2770- unsigned long ret;
2771-
2772-#ifdef CONFIG_PPC_STD_MMU_64
2773- /*
2774- * If we are using 1TB segments and we are allowed to randomise
2775- * the heap, we can put it above 1TB so it is backed by a 1TB
2776- * segment. Otherwise the heap will be in the bottom 1TB
2777- * which always uses 256MB segments and this may result in a
2778- * performance penalty.
2779- */
2780- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2781- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2782-#endif
2783-
2784- ret = PAGE_ALIGN(base + brk_rnd());
2785-
2786- if (ret < mm->brk)
2787- return mm->brk;
2788-
2789- return ret;
2790-}
2791-
2792-unsigned long randomize_et_dyn(unsigned long base)
2793-{
2794- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2795-
2796- if (ret < base)
2797- return base;
2798-
2799- return ret;
2800-}
2801diff -urNp linux-2.6.32.45/arch/powerpc/kernel/ptrace.c linux-2.6.32.45/arch/powerpc/kernel/ptrace.c
2802--- linux-2.6.32.45/arch/powerpc/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
2803+++ linux-2.6.32.45/arch/powerpc/kernel/ptrace.c 2011-08-21 15:53:39.000000000 -0400
2804@@ -86,7 +86,7 @@ static int set_user_trap(struct task_str
2805 /*
2806 * Get contents of register REGNO in task TASK.
2807 */
2808-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
2809+unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
2810 {
2811 if (task->thread.regs == NULL)
2812 return -EIO;
2813@@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *chi
2814
2815 CHECK_FULL_REGS(child->thread.regs);
2816 if (index < PT_FPR0) {
2817- tmp = ptrace_get_reg(child, (int) index);
2818+ tmp = ptrace_get_reg(child, index);
2819 } else {
2820 flush_fp_to_thread(child);
2821 tmp = ((unsigned long *)child->thread.fpr)
2822diff -urNp linux-2.6.32.45/arch/powerpc/kernel/signal_32.c linux-2.6.32.45/arch/powerpc/kernel/signal_32.c
2823--- linux-2.6.32.45/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2824+++ linux-2.6.32.45/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2825@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2826 /* Save user registers on the stack */
2827 frame = &rt_sf->uc.uc_mcontext;
2828 addr = frame;
2829- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2830+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2831 if (save_user_regs(regs, frame, 0, 1))
2832 goto badframe;
2833 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2834diff -urNp linux-2.6.32.45/arch/powerpc/kernel/signal_64.c linux-2.6.32.45/arch/powerpc/kernel/signal_64.c
2835--- linux-2.6.32.45/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2836+++ linux-2.6.32.45/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2837@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2838 current->thread.fpscr.val = 0;
2839
2840 /* Set up to return from userspace. */
2841- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2842+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2843 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2844 } else {
2845 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2846diff -urNp linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c
2847--- linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2848+++ linux-2.6.32.45/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2849@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2850 if (oldlenp) {
2851 if (!error) {
2852 if (get_user(oldlen, oldlenp) ||
2853- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2854+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2855+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2856 error = -EFAULT;
2857 }
2858- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2859 }
2860 return error;
2861 }
2862diff -urNp linux-2.6.32.45/arch/powerpc/kernel/traps.c linux-2.6.32.45/arch/powerpc/kernel/traps.c
2863--- linux-2.6.32.45/arch/powerpc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
2864+++ linux-2.6.32.45/arch/powerpc/kernel/traps.c 2011-06-13 21:33:37.000000000 -0400
2865@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2866 static inline void pmac_backlight_unblank(void) { }
2867 #endif
2868
2869+extern void gr_handle_kernel_exploit(void);
2870+
2871 int die(const char *str, struct pt_regs *regs, long err)
2872 {
2873 static struct {
2874@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2875 if (panic_on_oops)
2876 panic("Fatal exception");
2877
2878+ gr_handle_kernel_exploit();
2879+
2880 oops_exit();
2881 do_exit(err);
2882
2883diff -urNp linux-2.6.32.45/arch/powerpc/kernel/vdso.c linux-2.6.32.45/arch/powerpc/kernel/vdso.c
2884--- linux-2.6.32.45/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2885+++ linux-2.6.32.45/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2886@@ -36,6 +36,7 @@
2887 #include <asm/firmware.h>
2888 #include <asm/vdso.h>
2889 #include <asm/vdso_datapage.h>
2890+#include <asm/mman.h>
2891
2892 #include "setup.h"
2893
2894@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2895 vdso_base = VDSO32_MBASE;
2896 #endif
2897
2898- current->mm->context.vdso_base = 0;
2899+ current->mm->context.vdso_base = ~0UL;
2900
2901 /* vDSO has a problem and was disabled, just don't "enable" it for the
2902 * process
2903@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2904 vdso_base = get_unmapped_area(NULL, vdso_base,
2905 (vdso_pages << PAGE_SHIFT) +
2906 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2907- 0, 0);
2908+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2909 if (IS_ERR_VALUE(vdso_base)) {
2910 rc = vdso_base;
2911 goto fail_mmapsem;
2912diff -urNp linux-2.6.32.45/arch/powerpc/kernel/vio.c linux-2.6.32.45/arch/powerpc/kernel/vio.c
2913--- linux-2.6.32.45/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2914+++ linux-2.6.32.45/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2915@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2916 vio_cmo_dealloc(viodev, alloc_size);
2917 }
2918
2919-struct dma_map_ops vio_dma_mapping_ops = {
2920+static const struct dma_map_ops vio_dma_mapping_ops = {
2921 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2922 .free_coherent = vio_dma_iommu_free_coherent,
2923 .map_sg = vio_dma_iommu_map_sg,
2924 .unmap_sg = vio_dma_iommu_unmap_sg,
2925+ .dma_supported = dma_iommu_dma_supported,
2926 .map_page = vio_dma_iommu_map_page,
2927 .unmap_page = vio_dma_iommu_unmap_page,
2928
2929@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2930
2931 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2932 {
2933- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2934 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2935 }
2936
2937diff -urNp linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c
2938--- linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2939+++ linux-2.6.32.45/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2940@@ -9,22 +9,6 @@
2941 #include <linux/module.h>
2942 #include <asm/uaccess.h>
2943
2944-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2945-{
2946- if (likely(access_ok(VERIFY_READ, from, n)))
2947- n = __copy_from_user(to, from, n);
2948- else
2949- memset(to, 0, n);
2950- return n;
2951-}
2952-
2953-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2954-{
2955- if (likely(access_ok(VERIFY_WRITE, to, n)))
2956- n = __copy_to_user(to, from, n);
2957- return n;
2958-}
2959-
2960 unsigned long copy_in_user(void __user *to, const void __user *from,
2961 unsigned long n)
2962 {
2963@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2964 return n;
2965 }
2966
2967-EXPORT_SYMBOL(copy_from_user);
2968-EXPORT_SYMBOL(copy_to_user);
2969 EXPORT_SYMBOL(copy_in_user);
2970
2971diff -urNp linux-2.6.32.45/arch/powerpc/Makefile linux-2.6.32.45/arch/powerpc/Makefile
2972--- linux-2.6.32.45/arch/powerpc/Makefile 2011-03-27 14:31:47.000000000 -0400
2973+++ linux-2.6.32.45/arch/powerpc/Makefile 2011-08-21 19:27:08.000000000 -0400
2974@@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2975 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2976 CPP = $(CC) -E $(KBUILD_CFLAGS)
2977
2978+cflags-y += -Wno-sign-compare -Wno-extra
2979+
2980 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2981
2982 ifeq ($(CONFIG_PPC64),y)
2983diff -urNp linux-2.6.32.45/arch/powerpc/mm/fault.c linux-2.6.32.45/arch/powerpc/mm/fault.c
2984--- linux-2.6.32.45/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2985+++ linux-2.6.32.45/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2986@@ -30,6 +30,10 @@
2987 #include <linux/kprobes.h>
2988 #include <linux/kdebug.h>
2989 #include <linux/perf_event.h>
2990+#include <linux/slab.h>
2991+#include <linux/pagemap.h>
2992+#include <linux/compiler.h>
2993+#include <linux/unistd.h>
2994
2995 #include <asm/firmware.h>
2996 #include <asm/page.h>
2997@@ -40,6 +44,7 @@
2998 #include <asm/uaccess.h>
2999 #include <asm/tlbflush.h>
3000 #include <asm/siginfo.h>
3001+#include <asm/ptrace.h>
3002
3003
3004 #ifdef CONFIG_KPROBES
3005@@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
3006 }
3007 #endif
3008
3009+#ifdef CONFIG_PAX_PAGEEXEC
3010+/*
3011+ * PaX: decide what to do with offenders (regs->nip = fault address)
3012+ *
3013+ * returns 1 when task should be killed
3014+ */
3015+static int pax_handle_fetch_fault(struct pt_regs *regs)
3016+{
3017+ return 1;
3018+}
3019+
3020+void pax_report_insns(void *pc, void *sp)
3021+{
3022+ unsigned long i;
3023+
3024+ printk(KERN_ERR "PAX: bytes at PC: ");
3025+ for (i = 0; i < 5; i++) {
3026+ unsigned int c;
3027+ if (get_user(c, (unsigned int __user *)pc+i))
3028+ printk(KERN_CONT "???????? ");
3029+ else
3030+ printk(KERN_CONT "%08x ", c);
3031+ }
3032+ printk("\n");
3033+}
3034+#endif
3035+
3036 /*
3037 * Check whether the instruction at regs->nip is a store using
3038 * an update addressing form which will update r1.
3039@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
3040 * indicate errors in DSISR but can validly be set in SRR1.
3041 */
3042 if (trap == 0x400)
3043- error_code &= 0x48200000;
3044+ error_code &= 0x58200000;
3045 else
3046 is_write = error_code & DSISR_ISSTORE;
3047 #else
3048@@ -250,7 +282,7 @@ good_area:
3049 * "undefined". Of those that can be set, this is the only
3050 * one which seems bad.
3051 */
3052- if (error_code & 0x10000000)
3053+ if (error_code & DSISR_GUARDED)
3054 /* Guarded storage error. */
3055 goto bad_area;
3056 #endif /* CONFIG_8xx */
3057@@ -265,7 +297,7 @@ good_area:
3058 * processors use the same I/D cache coherency mechanism
3059 * as embedded.
3060 */
3061- if (error_code & DSISR_PROTFAULT)
3062+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3063 goto bad_area;
3064 #endif /* CONFIG_PPC_STD_MMU */
3065
3066@@ -335,6 +367,23 @@ bad_area:
3067 bad_area_nosemaphore:
3068 /* User mode accesses cause a SIGSEGV */
3069 if (user_mode(regs)) {
3070+
3071+#ifdef CONFIG_PAX_PAGEEXEC
3072+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3073+#ifdef CONFIG_PPC_STD_MMU
3074+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3075+#else
3076+ if (is_exec && regs->nip == address) {
3077+#endif
3078+ switch (pax_handle_fetch_fault(regs)) {
3079+ }
3080+
3081+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3082+ do_group_exit(SIGKILL);
3083+ }
3084+ }
3085+#endif
3086+
3087 _exception(SIGSEGV, regs, code, address);
3088 return 0;
3089 }
3090diff -urNp linux-2.6.32.45/arch/powerpc/mm/mem.c linux-2.6.32.45/arch/powerpc/mm/mem.c
3091--- linux-2.6.32.45/arch/powerpc/mm/mem.c 2011-03-27 14:31:47.000000000 -0400
3092+++ linux-2.6.32.45/arch/powerpc/mm/mem.c 2011-08-21 15:50:39.000000000 -0400
3093@@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(voi
3094 {
3095 unsigned long lmb_next_region_start_pfn,
3096 lmb_region_max_pfn;
3097- int i;
3098+ unsigned int i;
3099
3100 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3101 lmb_region_max_pfn =
3102diff -urNp linux-2.6.32.45/arch/powerpc/mm/mmap_64.c linux-2.6.32.45/arch/powerpc/mm/mmap_64.c
3103--- linux-2.6.32.45/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
3104+++ linux-2.6.32.45/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
3105@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
3106 */
3107 if (mmap_is_legacy()) {
3108 mm->mmap_base = TASK_UNMAPPED_BASE;
3109+
3110+#ifdef CONFIG_PAX_RANDMMAP
3111+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3112+ mm->mmap_base += mm->delta_mmap;
3113+#endif
3114+
3115 mm->get_unmapped_area = arch_get_unmapped_area;
3116 mm->unmap_area = arch_unmap_area;
3117 } else {
3118 mm->mmap_base = mmap_base();
3119+
3120+#ifdef CONFIG_PAX_RANDMMAP
3121+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3122+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3123+#endif
3124+
3125 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3126 mm->unmap_area = arch_unmap_area_topdown;
3127 }
3128diff -urNp linux-2.6.32.45/arch/powerpc/mm/slice.c linux-2.6.32.45/arch/powerpc/mm/slice.c
3129--- linux-2.6.32.45/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
3130+++ linux-2.6.32.45/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
3131@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
3132 if ((mm->task_size - len) < addr)
3133 return 0;
3134 vma = find_vma(mm, addr);
3135- return (!vma || (addr + len) <= vma->vm_start);
3136+ return check_heap_stack_gap(vma, addr, len);
3137 }
3138
3139 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3140@@ -256,7 +256,7 @@ full_search:
3141 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3142 continue;
3143 }
3144- if (!vma || addr + len <= vma->vm_start) {
3145+ if (check_heap_stack_gap(vma, addr, len)) {
3146 /*
3147 * Remember the place where we stopped the search:
3148 */
3149@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
3150 }
3151 }
3152
3153- addr = mm->mmap_base;
3154- while (addr > len) {
3155+ if (mm->mmap_base < len)
3156+ addr = -ENOMEM;
3157+ else
3158+ addr = mm->mmap_base - len;
3159+
3160+ while (!IS_ERR_VALUE(addr)) {
3161 /* Go down by chunk size */
3162- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3163+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
3164
3165 /* Check for hit with different page size */
3166 mask = slice_range_to_mask(addr, len);
3167@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
3168 * return with success:
3169 */
3170 vma = find_vma(mm, addr);
3171- if (!vma || (addr + len) <= vma->vm_start) {
3172+ if (check_heap_stack_gap(vma, addr, len)) {
3173 /* remember the address as a hint for next time */
3174 if (use_cache)
3175 mm->free_area_cache = addr;
3176@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
3177 mm->cached_hole_size = vma->vm_start - addr;
3178
3179 /* try just below the current vma->vm_start */
3180- addr = vma->vm_start;
3181+ addr = skip_heap_stack_gap(vma, len);
3182 }
3183
3184 /*
3185@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
3186 if (fixed && addr > (mm->task_size - len))
3187 return -EINVAL;
3188
3189+#ifdef CONFIG_PAX_RANDMMAP
3190+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3191+ addr = 0;
3192+#endif
3193+
3194 /* If hint, make sure it matches our alignment restrictions */
3195 if (!fixed && addr) {
3196 addr = _ALIGN_UP(addr, 1ul << pshift);
3197diff -urNp linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c
3198--- linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
3199+++ linux-2.6.32.45/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
3200@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3201 lite5200_pm_target_state = PM_SUSPEND_ON;
3202 }
3203
3204-static struct platform_suspend_ops lite5200_pm_ops = {
3205+static const struct platform_suspend_ops lite5200_pm_ops = {
3206 .valid = lite5200_pm_valid,
3207 .begin = lite5200_pm_begin,
3208 .prepare = lite5200_pm_prepare,
3209diff -urNp linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3210--- linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
3211+++ linux-2.6.32.45/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
3212@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3213 iounmap(mbar);
3214 }
3215
3216-static struct platform_suspend_ops mpc52xx_pm_ops = {
3217+static const struct platform_suspend_ops mpc52xx_pm_ops = {
3218 .valid = mpc52xx_pm_valid,
3219 .prepare = mpc52xx_pm_prepare,
3220 .enter = mpc52xx_pm_enter,
3221diff -urNp linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c
3222--- linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
3223+++ linux-2.6.32.45/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
3224@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3225 return ret;
3226 }
3227
3228-static struct platform_suspend_ops mpc83xx_suspend_ops = {
3229+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3230 .valid = mpc83xx_suspend_valid,
3231 .begin = mpc83xx_suspend_begin,
3232 .enter = mpc83xx_suspend_enter,
3233diff -urNp linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c
3234--- linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
3235+++ linux-2.6.32.45/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
3236@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3237
3238 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3239
3240-struct dma_map_ops dma_iommu_fixed_ops = {
3241+const struct dma_map_ops dma_iommu_fixed_ops = {
3242 .alloc_coherent = dma_fixed_alloc_coherent,
3243 .free_coherent = dma_fixed_free_coherent,
3244 .map_sg = dma_fixed_map_sg,
3245diff -urNp linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c
3246--- linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
3247+++ linux-2.6.32.45/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
3248@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3249 return mask >= DMA_BIT_MASK(32);
3250 }
3251
3252-static struct dma_map_ops ps3_sb_dma_ops = {
3253+static const struct dma_map_ops ps3_sb_dma_ops = {
3254 .alloc_coherent = ps3_alloc_coherent,
3255 .free_coherent = ps3_free_coherent,
3256 .map_sg = ps3_sb_map_sg,
3257@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3258 .unmap_page = ps3_unmap_page,
3259 };
3260
3261-static struct dma_map_ops ps3_ioc0_dma_ops = {
3262+static const struct dma_map_ops ps3_ioc0_dma_ops = {
3263 .alloc_coherent = ps3_alloc_coherent,
3264 .free_coherent = ps3_free_coherent,
3265 .map_sg = ps3_ioc0_map_sg,
3266diff -urNp linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig
3267--- linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
3268+++ linux-2.6.32.45/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
3269@@ -2,6 +2,8 @@ config PPC_PSERIES
3270 depends on PPC64 && PPC_BOOK3S
3271 bool "IBM pSeries & new (POWER5-based) iSeries"
3272 select MPIC
3273+ select PCI_MSI
3274+ select XICS
3275 select PPC_I8259
3276 select PPC_RTAS
3277 select RTAS_ERROR_LOGGING
3278diff -urNp linux-2.6.32.45/arch/s390/include/asm/elf.h linux-2.6.32.45/arch/s390/include/asm/elf.h
3279--- linux-2.6.32.45/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3280+++ linux-2.6.32.45/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3281@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3282 that it will "exec", and that there is sufficient room for the brk. */
3283 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3284
3285+#ifdef CONFIG_PAX_ASLR
3286+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3287+
3288+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3289+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3290+#endif
3291+
3292 /* This yields a mask that user programs can use to figure out what
3293 instruction set this CPU supports. */
3294
3295diff -urNp linux-2.6.32.45/arch/s390/include/asm/setup.h linux-2.6.32.45/arch/s390/include/asm/setup.h
3296--- linux-2.6.32.45/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3297+++ linux-2.6.32.45/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3298@@ -50,13 +50,13 @@ extern unsigned long memory_end;
3299 void detect_memory_layout(struct mem_chunk chunk[]);
3300
3301 #ifdef CONFIG_S390_SWITCH_AMODE
3302-extern unsigned int switch_amode;
3303+#define switch_amode (1)
3304 #else
3305 #define switch_amode (0)
3306 #endif
3307
3308 #ifdef CONFIG_S390_EXEC_PROTECT
3309-extern unsigned int s390_noexec;
3310+#define s390_noexec (1)
3311 #else
3312 #define s390_noexec (0)
3313 #endif
3314diff -urNp linux-2.6.32.45/arch/s390/include/asm/uaccess.h linux-2.6.32.45/arch/s390/include/asm/uaccess.h
3315--- linux-2.6.32.45/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3316+++ linux-2.6.32.45/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3317@@ -232,6 +232,10 @@ static inline unsigned long __must_check
3318 copy_to_user(void __user *to, const void *from, unsigned long n)
3319 {
3320 might_fault();
3321+
3322+ if ((long)n < 0)
3323+ return n;
3324+
3325 if (access_ok(VERIFY_WRITE, to, n))
3326 n = __copy_to_user(to, from, n);
3327 return n;
3328@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3329 static inline unsigned long __must_check
3330 __copy_from_user(void *to, const void __user *from, unsigned long n)
3331 {
3332+ if ((long)n < 0)
3333+ return n;
3334+
3335 if (__builtin_constant_p(n) && (n <= 256))
3336 return uaccess.copy_from_user_small(n, from, to);
3337 else
3338@@ -283,6 +290,10 @@ static inline unsigned long __must_check
3339 copy_from_user(void *to, const void __user *from, unsigned long n)
3340 {
3341 might_fault();
3342+
3343+ if ((long)n < 0)
3344+ return n;
3345+
3346 if (access_ok(VERIFY_READ, from, n))
3347 n = __copy_from_user(to, from, n);
3348 else
3349diff -urNp linux-2.6.32.45/arch/s390/Kconfig linux-2.6.32.45/arch/s390/Kconfig
3350--- linux-2.6.32.45/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3351+++ linux-2.6.32.45/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3352@@ -194,28 +194,26 @@ config AUDIT_ARCH
3353
3354 config S390_SWITCH_AMODE
3355 bool "Switch kernel/user addressing modes"
3356+ default y
3357 help
3358 This option allows to switch the addressing modes of kernel and user
3359- space. The kernel parameter switch_amode=on will enable this feature,
3360- default is disabled. Enabling this (via kernel parameter) on machines
3361- earlier than IBM System z9-109 EC/BC will reduce system performance.
3362+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3363+ will reduce system performance.
3364
3365 Note that this option will also be selected by selecting the execute
3366- protection option below. Enabling the execute protection via the
3367- noexec kernel parameter will also switch the addressing modes,
3368- independent of the switch_amode kernel parameter.
3369+ protection option below. Enabling the execute protection will also
3370+ switch the addressing modes, independent of this option.
3371
3372
3373 config S390_EXEC_PROTECT
3374 bool "Data execute protection"
3375+ default y
3376 select S390_SWITCH_AMODE
3377 help
3378 This option allows to enable a buffer overflow protection for user
3379 space programs and it also selects the addressing mode option above.
3380- The kernel parameter noexec=on will enable this feature and also
3381- switch the addressing modes, default is disabled. Enabling this (via
3382- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3383- will reduce system performance.
3384+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
3385+ reduce system performance.
3386
3387 comment "Code generation options"
3388
3389diff -urNp linux-2.6.32.45/arch/s390/kernel/module.c linux-2.6.32.45/arch/s390/kernel/module.c
3390--- linux-2.6.32.45/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3391+++ linux-2.6.32.45/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3392@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3393
3394 /* Increase core size by size of got & plt and set start
3395 offsets for got and plt. */
3396- me->core_size = ALIGN(me->core_size, 4);
3397- me->arch.got_offset = me->core_size;
3398- me->core_size += me->arch.got_size;
3399- me->arch.plt_offset = me->core_size;
3400- me->core_size += me->arch.plt_size;
3401+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3402+ me->arch.got_offset = me->core_size_rw;
3403+ me->core_size_rw += me->arch.got_size;
3404+ me->arch.plt_offset = me->core_size_rx;
3405+ me->core_size_rx += me->arch.plt_size;
3406 return 0;
3407 }
3408
3409@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3410 if (info->got_initialized == 0) {
3411 Elf_Addr *gotent;
3412
3413- gotent = me->module_core + me->arch.got_offset +
3414+ gotent = me->module_core_rw + me->arch.got_offset +
3415 info->got_offset;
3416 *gotent = val;
3417 info->got_initialized = 1;
3418@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3419 else if (r_type == R_390_GOTENT ||
3420 r_type == R_390_GOTPLTENT)
3421 *(unsigned int *) loc =
3422- (val + (Elf_Addr) me->module_core - loc) >> 1;
3423+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3424 else if (r_type == R_390_GOT64 ||
3425 r_type == R_390_GOTPLT64)
3426 *(unsigned long *) loc = val;
3427@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3428 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3429 if (info->plt_initialized == 0) {
3430 unsigned int *ip;
3431- ip = me->module_core + me->arch.plt_offset +
3432+ ip = me->module_core_rx + me->arch.plt_offset +
3433 info->plt_offset;
3434 #ifndef CONFIG_64BIT
3435 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3436@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3437 val - loc + 0xffffUL < 0x1ffffeUL) ||
3438 (r_type == R_390_PLT32DBL &&
3439 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3440- val = (Elf_Addr) me->module_core +
3441+ val = (Elf_Addr) me->module_core_rx +
3442 me->arch.plt_offset +
3443 info->plt_offset;
3444 val += rela->r_addend - loc;
3445@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3446 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3447 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3448 val = val + rela->r_addend -
3449- ((Elf_Addr) me->module_core + me->arch.got_offset);
3450+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3451 if (r_type == R_390_GOTOFF16)
3452 *(unsigned short *) loc = val;
3453 else if (r_type == R_390_GOTOFF32)
3454@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3455 break;
3456 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3457 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3458- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3459+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3460 rela->r_addend - loc;
3461 if (r_type == R_390_GOTPC)
3462 *(unsigned int *) loc = val;
3463diff -urNp linux-2.6.32.45/arch/s390/kernel/setup.c linux-2.6.32.45/arch/s390/kernel/setup.c
3464--- linux-2.6.32.45/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3465+++ linux-2.6.32.45/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3466@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3467 early_param("mem", early_parse_mem);
3468
3469 #ifdef CONFIG_S390_SWITCH_AMODE
3470-unsigned int switch_amode = 0;
3471-EXPORT_SYMBOL_GPL(switch_amode);
3472-
3473 static int set_amode_and_uaccess(unsigned long user_amode,
3474 unsigned long user32_amode)
3475 {
3476@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3477 return 0;
3478 }
3479 }
3480-
3481-/*
3482- * Switch kernel/user addressing modes?
3483- */
3484-static int __init early_parse_switch_amode(char *p)
3485-{
3486- switch_amode = 1;
3487- return 0;
3488-}
3489-early_param("switch_amode", early_parse_switch_amode);
3490-
3491 #else /* CONFIG_S390_SWITCH_AMODE */
3492 static inline int set_amode_and_uaccess(unsigned long user_amode,
3493 unsigned long user32_amode)
3494@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3495 }
3496 #endif /* CONFIG_S390_SWITCH_AMODE */
3497
3498-#ifdef CONFIG_S390_EXEC_PROTECT
3499-unsigned int s390_noexec = 0;
3500-EXPORT_SYMBOL_GPL(s390_noexec);
3501-
3502-/*
3503- * Enable execute protection?
3504- */
3505-static int __init early_parse_noexec(char *p)
3506-{
3507- if (!strncmp(p, "off", 3))
3508- return 0;
3509- switch_amode = 1;
3510- s390_noexec = 1;
3511- return 0;
3512-}
3513-early_param("noexec", early_parse_noexec);
3514-#endif /* CONFIG_S390_EXEC_PROTECT */
3515-
3516 static void setup_addressing_mode(void)
3517 {
3518 if (s390_noexec) {
3519diff -urNp linux-2.6.32.45/arch/s390/mm/mmap.c linux-2.6.32.45/arch/s390/mm/mmap.c
3520--- linux-2.6.32.45/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3521+++ linux-2.6.32.45/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3522@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3523 */
3524 if (mmap_is_legacy()) {
3525 mm->mmap_base = TASK_UNMAPPED_BASE;
3526+
3527+#ifdef CONFIG_PAX_RANDMMAP
3528+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3529+ mm->mmap_base += mm->delta_mmap;
3530+#endif
3531+
3532 mm->get_unmapped_area = arch_get_unmapped_area;
3533 mm->unmap_area = arch_unmap_area;
3534 } else {
3535 mm->mmap_base = mmap_base();
3536+
3537+#ifdef CONFIG_PAX_RANDMMAP
3538+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3539+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3540+#endif
3541+
3542 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3543 mm->unmap_area = arch_unmap_area_topdown;
3544 }
3545@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3546 */
3547 if (mmap_is_legacy()) {
3548 mm->mmap_base = TASK_UNMAPPED_BASE;
3549+
3550+#ifdef CONFIG_PAX_RANDMMAP
3551+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3552+ mm->mmap_base += mm->delta_mmap;
3553+#endif
3554+
3555 mm->get_unmapped_area = s390_get_unmapped_area;
3556 mm->unmap_area = arch_unmap_area;
3557 } else {
3558 mm->mmap_base = mmap_base();
3559+
3560+#ifdef CONFIG_PAX_RANDMMAP
3561+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3562+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3563+#endif
3564+
3565 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3566 mm->unmap_area = arch_unmap_area_topdown;
3567 }
3568diff -urNp linux-2.6.32.45/arch/score/include/asm/system.h linux-2.6.32.45/arch/score/include/asm/system.h
3569--- linux-2.6.32.45/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3570+++ linux-2.6.32.45/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3571@@ -17,7 +17,7 @@ do { \
3572 #define finish_arch_switch(prev) do {} while (0)
3573
3574 typedef void (*vi_handler_t)(void);
3575-extern unsigned long arch_align_stack(unsigned long sp);
3576+#define arch_align_stack(x) (x)
3577
3578 #define mb() barrier()
3579 #define rmb() barrier()
3580diff -urNp linux-2.6.32.45/arch/score/kernel/process.c linux-2.6.32.45/arch/score/kernel/process.c
3581--- linux-2.6.32.45/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3582+++ linux-2.6.32.45/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3583@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3584
3585 return task_pt_regs(task)->cp0_epc;
3586 }
3587-
3588-unsigned long arch_align_stack(unsigned long sp)
3589-{
3590- return sp;
3591-}
3592diff -urNp linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c
3593--- linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3594+++ linux-2.6.32.45/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3595@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3596 return 0;
3597 }
3598
3599-static struct platform_suspend_ops hp6x0_pm_ops = {
3600+static const struct platform_suspend_ops hp6x0_pm_ops = {
3601 .enter = hp6x0_pm_enter,
3602 .valid = suspend_valid_only_mem,
3603 };
3604diff -urNp linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c
3605--- linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3606+++ linux-2.6.32.45/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3607@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3608 NULL,
3609 };
3610
3611-static struct sysfs_ops sq_sysfs_ops = {
3612+static const struct sysfs_ops sq_sysfs_ops = {
3613 .show = sq_sysfs_show,
3614 .store = sq_sysfs_store,
3615 };
3616diff -urNp linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c
3617--- linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3618+++ linux-2.6.32.45/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3619@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3620 return 0;
3621 }
3622
3623-static struct platform_suspend_ops sh_pm_ops = {
3624+static const struct platform_suspend_ops sh_pm_ops = {
3625 .enter = sh_pm_enter,
3626 .valid = suspend_valid_only_mem,
3627 };
3628diff -urNp linux-2.6.32.45/arch/sh/kernel/kgdb.c linux-2.6.32.45/arch/sh/kernel/kgdb.c
3629--- linux-2.6.32.45/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3630+++ linux-2.6.32.45/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3631@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3632 {
3633 }
3634
3635-struct kgdb_arch arch_kgdb_ops = {
3636+const struct kgdb_arch arch_kgdb_ops = {
3637 /* Breakpoint instruction: trapa #0x3c */
3638 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3639 .gdb_bpt_instr = { 0x3c, 0xc3 },
3640diff -urNp linux-2.6.32.45/arch/sh/mm/mmap.c linux-2.6.32.45/arch/sh/mm/mmap.c
3641--- linux-2.6.32.45/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3642+++ linux-2.6.32.45/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3643@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3644 addr = PAGE_ALIGN(addr);
3645
3646 vma = find_vma(mm, addr);
3647- if (TASK_SIZE - len >= addr &&
3648- (!vma || addr + len <= vma->vm_start))
3649+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3650 return addr;
3651 }
3652
3653@@ -106,7 +105,7 @@ full_search:
3654 }
3655 return -ENOMEM;
3656 }
3657- if (likely(!vma || addr + len <= vma->vm_start)) {
3658+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3659 /*
3660 * Remember the place where we stopped the search:
3661 */
3662@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3663 addr = PAGE_ALIGN(addr);
3664
3665 vma = find_vma(mm, addr);
3666- if (TASK_SIZE - len >= addr &&
3667- (!vma || addr + len <= vma->vm_start))
3668+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3669 return addr;
3670 }
3671
3672@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3673 /* make sure it can fit in the remaining address space */
3674 if (likely(addr > len)) {
3675 vma = find_vma(mm, addr-len);
3676- if (!vma || addr <= vma->vm_start) {
3677+ if (check_heap_stack_gap(vma, addr - len, len)) {
3678 /* remember the address as a hint for next time */
3679 return (mm->free_area_cache = addr-len);
3680 }
3681@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3682 if (unlikely(mm->mmap_base < len))
3683 goto bottomup;
3684
3685- addr = mm->mmap_base-len;
3686- if (do_colour_align)
3687- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3688+ addr = mm->mmap_base - len;
3689
3690 do {
3691+ if (do_colour_align)
3692+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3693 /*
3694 * Lookup failure means no vma is above this address,
3695 * else if new region fits below vma->vm_start,
3696 * return with success:
3697 */
3698 vma = find_vma(mm, addr);
3699- if (likely(!vma || addr+len <= vma->vm_start)) {
3700+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3701 /* remember the address as a hint for next time */
3702 return (mm->free_area_cache = addr);
3703 }
3704@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3705 mm->cached_hole_size = vma->vm_start - addr;
3706
3707 /* try just below the current vma->vm_start */
3708- addr = vma->vm_start-len;
3709- if (do_colour_align)
3710- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3711- } while (likely(len < vma->vm_start));
3712+ addr = skip_heap_stack_gap(vma, len);
3713+ } while (!IS_ERR_VALUE(addr));
3714
3715 bottomup:
3716 /*
3717diff -urNp linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h
3718--- linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3719+++ linux-2.6.32.45/arch/sparc/include/asm/atomic_64.h 2011-08-18 23:11:34.000000000 -0400
3720@@ -14,18 +14,40 @@
3721 #define ATOMIC64_INIT(i) { (i) }
3722
3723 #define atomic_read(v) ((v)->counter)
3724+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3725+{
3726+ return v->counter;
3727+}
3728 #define atomic64_read(v) ((v)->counter)
3729+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3730+{
3731+ return v->counter;
3732+}
3733
3734 #define atomic_set(v, i) (((v)->counter) = i)
3735+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3736+{
3737+ v->counter = i;
3738+}
3739 #define atomic64_set(v, i) (((v)->counter) = i)
3740+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3741+{
3742+ v->counter = i;
3743+}
3744
3745 extern void atomic_add(int, atomic_t *);
3746+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3747 extern void atomic64_add(long, atomic64_t *);
3748+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3749 extern void atomic_sub(int, atomic_t *);
3750+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3751 extern void atomic64_sub(long, atomic64_t *);
3752+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3753
3754 extern int atomic_add_ret(int, atomic_t *);
3755+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3756 extern long atomic64_add_ret(long, atomic64_t *);
3757+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3758 extern int atomic_sub_ret(int, atomic_t *);
3759 extern long atomic64_sub_ret(long, atomic64_t *);
3760
3761@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
3762 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3763
3764 #define atomic_inc_return(v) atomic_add_ret(1, v)
3765+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3766+{
3767+ return atomic_add_ret_unchecked(1, v);
3768+}
3769 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3770+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3771+{
3772+ return atomic64_add_ret_unchecked(1, v);
3773+}
3774
3775 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3776 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3777
3778 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3779+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3780+{
3781+ return atomic_add_ret_unchecked(i, v);
3782+}
3783 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3784+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3785+{
3786+ return atomic64_add_ret_unchecked(i, v);
3787+}
3788
3789 /*
3790 * atomic_inc_and_test - increment and test
3791@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
3792 * other cases.
3793 */
3794 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3795+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3796+{
3797+ return atomic_inc_return_unchecked(v) == 0;
3798+}
3799 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3800
3801 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3802@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
3803 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3804
3805 #define atomic_inc(v) atomic_add(1, v)
3806+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3807+{
3808+ atomic_add_unchecked(1, v);
3809+}
3810 #define atomic64_inc(v) atomic64_add(1, v)
3811+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3812+{
3813+ atomic64_add_unchecked(1, v);
3814+}
3815
3816 #define atomic_dec(v) atomic_sub(1, v)
3817+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3818+{
3819+ atomic_sub_unchecked(1, v);
3820+}
3821 #define atomic64_dec(v) atomic64_sub(1, v)
3822+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3823+{
3824+ atomic64_sub_unchecked(1, v);
3825+}
3826
3827 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3828 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3829
3830 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3831+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3832+{
3833+ return cmpxchg(&v->counter, old, new);
3834+}
3835 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3836+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3837+{
3838+ return xchg(&v->counter, new);
3839+}
3840
3841 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3842 {
3843- int c, old;
3844+ int c, old, new;
3845 c = atomic_read(v);
3846 for (;;) {
3847- if (unlikely(c == (u)))
3848+ if (unlikely(c == u))
3849 break;
3850- old = atomic_cmpxchg((v), c, c + (a));
3851+
3852+ asm volatile("addcc %2, %0, %0\n"
3853+
3854+#ifdef CONFIG_PAX_REFCOUNT
3855+ "tvs %%icc, 6\n"
3856+#endif
3857+
3858+ : "=r" (new)
3859+ : "0" (c), "ir" (a)
3860+ : "cc");
3861+
3862+ old = atomic_cmpxchg(v, c, new);
3863 if (likely(old == c))
3864 break;
3865 c = old;
3866 }
3867- return c != (u);
3868+ return c != u;
3869 }
3870
3871 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3872@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
3873 #define atomic64_cmpxchg(v, o, n) \
3874 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3875 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3876+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3877+{
3878+ return xchg(&v->counter, new);
3879+}
3880
3881 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3882 {
3883- long c, old;
3884+ long c, old, new;
3885 c = atomic64_read(v);
3886 for (;;) {
3887- if (unlikely(c == (u)))
3888+ if (unlikely(c == u))
3889 break;
3890- old = atomic64_cmpxchg((v), c, c + (a));
3891+
3892+ asm volatile("addcc %2, %0, %0\n"
3893+
3894+#ifdef CONFIG_PAX_REFCOUNT
3895+ "tvs %%xcc, 6\n"
3896+#endif
3897+
3898+ : "=r" (new)
3899+ : "0" (c), "ir" (a)
3900+ : "cc");
3901+
3902+ old = atomic64_cmpxchg(v, c, new);
3903 if (likely(old == c))
3904 break;
3905 c = old;
3906 }
3907- return c != (u);
3908+ return c != u;
3909 }
3910
3911 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3912diff -urNp linux-2.6.32.45/arch/sparc/include/asm/cache.h linux-2.6.32.45/arch/sparc/include/asm/cache.h
3913--- linux-2.6.32.45/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3914+++ linux-2.6.32.45/arch/sparc/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
3915@@ -8,7 +8,7 @@
3916 #define _SPARC_CACHE_H
3917
3918 #define L1_CACHE_SHIFT 5
3919-#define L1_CACHE_BYTES 32
3920+#define L1_CACHE_BYTES 32UL
3921 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3922
3923 #ifdef CONFIG_SPARC32
3924diff -urNp linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h
3925--- linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3926+++ linux-2.6.32.45/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3927@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3928 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3929 #define dma_is_consistent(d, h) (1)
3930
3931-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3932+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3933 extern struct bus_type pci_bus_type;
3934
3935-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3936+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3937 {
3938 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3939 if (dev->bus == &pci_bus_type)
3940@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3941 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3942 dma_addr_t *dma_handle, gfp_t flag)
3943 {
3944- struct dma_map_ops *ops = get_dma_ops(dev);
3945+ const struct dma_map_ops *ops = get_dma_ops(dev);
3946 void *cpu_addr;
3947
3948 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3949@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3950 static inline void dma_free_coherent(struct device *dev, size_t size,
3951 void *cpu_addr, dma_addr_t dma_handle)
3952 {
3953- struct dma_map_ops *ops = get_dma_ops(dev);
3954+ const struct dma_map_ops *ops = get_dma_ops(dev);
3955
3956 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3957 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3958diff -urNp linux-2.6.32.45/arch/sparc/include/asm/elf_32.h linux-2.6.32.45/arch/sparc/include/asm/elf_32.h
3959--- linux-2.6.32.45/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3960+++ linux-2.6.32.45/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3961@@ -116,6 +116,13 @@ typedef struct {
3962
3963 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3964
3965+#ifdef CONFIG_PAX_ASLR
3966+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3967+
3968+#define PAX_DELTA_MMAP_LEN 16
3969+#define PAX_DELTA_STACK_LEN 16
3970+#endif
3971+
3972 /* This yields a mask that user programs can use to figure out what
3973 instruction set this cpu supports. This can NOT be done in userspace
3974 on Sparc. */
3975diff -urNp linux-2.6.32.45/arch/sparc/include/asm/elf_64.h linux-2.6.32.45/arch/sparc/include/asm/elf_64.h
3976--- linux-2.6.32.45/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3977+++ linux-2.6.32.45/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3978@@ -163,6 +163,12 @@ typedef struct {
3979 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3980 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3981
3982+#ifdef CONFIG_PAX_ASLR
3983+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3984+
3985+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3986+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3987+#endif
3988
3989 /* This yields a mask that user programs can use to figure out what
3990 instruction set this cpu supports. */
3991diff -urNp linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h
3992--- linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3993+++ linux-2.6.32.45/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3994@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3995 BTFIXUPDEF_INT(page_none)
3996 BTFIXUPDEF_INT(page_copy)
3997 BTFIXUPDEF_INT(page_readonly)
3998+
3999+#ifdef CONFIG_PAX_PAGEEXEC
4000+BTFIXUPDEF_INT(page_shared_noexec)
4001+BTFIXUPDEF_INT(page_copy_noexec)
4002+BTFIXUPDEF_INT(page_readonly_noexec)
4003+#endif
4004+
4005 BTFIXUPDEF_INT(page_kernel)
4006
4007 #define PMD_SHIFT SUN4C_PMD_SHIFT
4008@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4009 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4010 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4011
4012+#ifdef CONFIG_PAX_PAGEEXEC
4013+extern pgprot_t PAGE_SHARED_NOEXEC;
4014+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4015+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4016+#else
4017+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4018+# define PAGE_COPY_NOEXEC PAGE_COPY
4019+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4020+#endif
4021+
4022 extern unsigned long page_kernel;
4023
4024 #ifdef MODULE
4025diff -urNp linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h
4026--- linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
4027+++ linux-2.6.32.45/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
4028@@ -115,6 +115,13 @@
4029 SRMMU_EXEC | SRMMU_REF)
4030 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4031 SRMMU_EXEC | SRMMU_REF)
4032+
4033+#ifdef CONFIG_PAX_PAGEEXEC
4034+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4035+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4036+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4037+#endif
4038+
4039 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4040 SRMMU_DIRTY | SRMMU_REF)
4041
4042diff -urNp linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h
4043--- linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
4044+++ linux-2.6.32.45/arch/sparc/include/asm/spinlock_64.h 2011-08-18 23:19:30.000000000 -0400
4045@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
4046
4047 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4048
4049-static void inline arch_read_lock(raw_rwlock_t *lock)
4050+static inline void arch_read_lock(raw_rwlock_t *lock)
4051 {
4052 unsigned long tmp1, tmp2;
4053
4054 __asm__ __volatile__ (
4055 "1: ldsw [%2], %0\n"
4056 " brlz,pn %0, 2f\n"
4057-"4: add %0, 1, %1\n"
4058+"4: addcc %0, 1, %1\n"
4059+
4060+#ifdef CONFIG_PAX_REFCOUNT
4061+" tvs %%icc, 6\n"
4062+#endif
4063+
4064 " cas [%2], %0, %1\n"
4065 " cmp %0, %1\n"
4066 " bne,pn %%icc, 1b\n"
4067@@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rw
4068 " .previous"
4069 : "=&r" (tmp1), "=&r" (tmp2)
4070 : "r" (lock)
4071- : "memory");
4072+ : "memory", "cc");
4073 }
4074
4075-static int inline arch_read_trylock(raw_rwlock_t *lock)
4076+static inline int arch_read_trylock(raw_rwlock_t *lock)
4077 {
4078 int tmp1, tmp2;
4079
4080@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
4081 "1: ldsw [%2], %0\n"
4082 " brlz,a,pn %0, 2f\n"
4083 " mov 0, %0\n"
4084-" add %0, 1, %1\n"
4085+" addcc %0, 1, %1\n"
4086+
4087+#ifdef CONFIG_PAX_REFCOUNT
4088+" tvs %%icc, 6\n"
4089+#endif
4090+
4091 " cas [%2], %0, %1\n"
4092 " cmp %0, %1\n"
4093 " bne,pn %%icc, 1b\n"
4094@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
4095 return tmp1;
4096 }
4097
4098-static void inline arch_read_unlock(raw_rwlock_t *lock)
4099+static inline void arch_read_unlock(raw_rwlock_t *lock)
4100 {
4101 unsigned long tmp1, tmp2;
4102
4103 __asm__ __volatile__(
4104 "1: lduw [%2], %0\n"
4105-" sub %0, 1, %1\n"
4106+" subcc %0, 1, %1\n"
4107+
4108+#ifdef CONFIG_PAX_REFCOUNT
4109+" tvs %%icc, 6\n"
4110+#endif
4111+
4112 " cas [%2], %0, %1\n"
4113 " cmp %0, %1\n"
4114 " bne,pn %%xcc, 1b\n"
4115@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
4116 : "memory");
4117 }
4118
4119-static void inline arch_write_lock(raw_rwlock_t *lock)
4120+static inline void arch_write_lock(raw_rwlock_t *lock)
4121 {
4122 unsigned long mask, tmp1, tmp2;
4123
4124@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
4125 : "memory");
4126 }
4127
4128-static void inline arch_write_unlock(raw_rwlock_t *lock)
4129+static inline void arch_write_unlock(raw_rwlock_t *lock)
4130 {
4131 __asm__ __volatile__(
4132 " stw %%g0, [%0]"
4133@@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw
4134 : "memory");
4135 }
4136
4137-static int inline arch_write_trylock(raw_rwlock_t *lock)
4138+static inline int arch_write_trylock(raw_rwlock_t *lock)
4139 {
4140 unsigned long mask, tmp1, tmp2, result;
4141
4142diff -urNp linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h
4143--- linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
4144+++ linux-2.6.32.45/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
4145@@ -50,6 +50,8 @@ struct thread_info {
4146 unsigned long w_saved;
4147
4148 struct restart_block restart_block;
4149+
4150+ unsigned long lowest_stack;
4151 };
4152
4153 /*
4154diff -urNp linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h
4155--- linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
4156+++ linux-2.6.32.45/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
4157@@ -68,6 +68,8 @@ struct thread_info {
4158 struct pt_regs *kern_una_regs;
4159 unsigned int kern_una_insn;
4160
4161+ unsigned long lowest_stack;
4162+
4163 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4164 };
4165
4166diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h
4167--- linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
4168+++ linux-2.6.32.45/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
4169@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
4170
4171 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4172 {
4173- if (n && __access_ok((unsigned long) to, n))
4174+ if ((long)n < 0)
4175+ return n;
4176+
4177+ if (n && __access_ok((unsigned long) to, n)) {
4178+ if (!__builtin_constant_p(n))
4179+ check_object_size(from, n, true);
4180 return __copy_user(to, (__force void __user *) from, n);
4181- else
4182+ } else
4183 return n;
4184 }
4185
4186 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4187 {
4188+ if ((long)n < 0)
4189+ return n;
4190+
4191+ if (!__builtin_constant_p(n))
4192+ check_object_size(from, n, true);
4193+
4194 return __copy_user(to, (__force void __user *) from, n);
4195 }
4196
4197 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4198 {
4199- if (n && __access_ok((unsigned long) from, n))
4200+ if ((long)n < 0)
4201+ return n;
4202+
4203+ if (n && __access_ok((unsigned long) from, n)) {
4204+ if (!__builtin_constant_p(n))
4205+ check_object_size(to, n, false);
4206 return __copy_user((__force void __user *) to, from, n);
4207- else
4208+ } else
4209 return n;
4210 }
4211
4212 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4213 {
4214+ if ((long)n < 0)
4215+ return n;
4216+
4217 return __copy_user((__force void __user *) to, from, n);
4218 }
4219
4220diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h
4221--- linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
4222+++ linux-2.6.32.45/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
4223@@ -9,6 +9,7 @@
4224 #include <linux/compiler.h>
4225 #include <linux/string.h>
4226 #include <linux/thread_info.h>
4227+#include <linux/kernel.h>
4228 #include <asm/asi.h>
4229 #include <asm/system.h>
4230 #include <asm/spitfire.h>
4231@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
4232 static inline unsigned long __must_check
4233 copy_from_user(void *to, const void __user *from, unsigned long size)
4234 {
4235- unsigned long ret = ___copy_from_user(to, from, size);
4236+ unsigned long ret;
4237
4238+ if ((long)size < 0 || size > INT_MAX)
4239+ return size;
4240+
4241+ if (!__builtin_constant_p(size))
4242+ check_object_size(to, size, false);
4243+
4244+ ret = ___copy_from_user(to, from, size);
4245 if (unlikely(ret))
4246 ret = copy_from_user_fixup(to, from, size);
4247 return ret;
4248@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
4249 static inline unsigned long __must_check
4250 copy_to_user(void __user *to, const void *from, unsigned long size)
4251 {
4252- unsigned long ret = ___copy_to_user(to, from, size);
4253+ unsigned long ret;
4254+
4255+ if ((long)size < 0 || size > INT_MAX)
4256+ return size;
4257+
4258+ if (!__builtin_constant_p(size))
4259+ check_object_size(from, size, true);
4260
4261+ ret = ___copy_to_user(to, from, size);
4262 if (unlikely(ret))
4263 ret = copy_to_user_fixup(to, from, size);
4264 return ret;
4265diff -urNp linux-2.6.32.45/arch/sparc/include/asm/uaccess.h linux-2.6.32.45/arch/sparc/include/asm/uaccess.h
4266--- linux-2.6.32.45/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
4267+++ linux-2.6.32.45/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
4268@@ -1,5 +1,13 @@
4269 #ifndef ___ASM_SPARC_UACCESS_H
4270 #define ___ASM_SPARC_UACCESS_H
4271+
4272+#ifdef __KERNEL__
4273+#ifndef __ASSEMBLY__
4274+#include <linux/types.h>
4275+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4276+#endif
4277+#endif
4278+
4279 #if defined(__sparc__) && defined(__arch64__)
4280 #include <asm/uaccess_64.h>
4281 #else
4282diff -urNp linux-2.6.32.45/arch/sparc/kernel/iommu.c linux-2.6.32.45/arch/sparc/kernel/iommu.c
4283--- linux-2.6.32.45/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
4284+++ linux-2.6.32.45/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
4285@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4286 spin_unlock_irqrestore(&iommu->lock, flags);
4287 }
4288
4289-static struct dma_map_ops sun4u_dma_ops = {
4290+static const struct dma_map_ops sun4u_dma_ops = {
4291 .alloc_coherent = dma_4u_alloc_coherent,
4292 .free_coherent = dma_4u_free_coherent,
4293 .map_page = dma_4u_map_page,
4294@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4295 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4296 };
4297
4298-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4299+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4300 EXPORT_SYMBOL(dma_ops);
4301
4302 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4303diff -urNp linux-2.6.32.45/arch/sparc/kernel/ioport.c linux-2.6.32.45/arch/sparc/kernel/ioport.c
4304--- linux-2.6.32.45/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
4305+++ linux-2.6.32.45/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
4306@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4307 BUG();
4308 }
4309
4310-struct dma_map_ops sbus_dma_ops = {
4311+const struct dma_map_ops sbus_dma_ops = {
4312 .alloc_coherent = sbus_alloc_coherent,
4313 .free_coherent = sbus_free_coherent,
4314 .map_page = sbus_map_page,
4315@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4316 .sync_sg_for_device = sbus_sync_sg_for_device,
4317 };
4318
4319-struct dma_map_ops *dma_ops = &sbus_dma_ops;
4320+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4321 EXPORT_SYMBOL(dma_ops);
4322
4323 static int __init sparc_register_ioport(void)
4324@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4325 }
4326 }
4327
4328-struct dma_map_ops pci32_dma_ops = {
4329+const struct dma_map_ops pci32_dma_ops = {
4330 .alloc_coherent = pci32_alloc_coherent,
4331 .free_coherent = pci32_free_coherent,
4332 .map_page = pci32_map_page,
4333diff -urNp linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c
4334--- linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4335+++ linux-2.6.32.45/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4336@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4337 {
4338 }
4339
4340-struct kgdb_arch arch_kgdb_ops = {
4341+const struct kgdb_arch arch_kgdb_ops = {
4342 /* Breakpoint instruction: ta 0x7d */
4343 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4344 };
4345diff -urNp linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c
4346--- linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4347+++ linux-2.6.32.45/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4348@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4349 {
4350 }
4351
4352-struct kgdb_arch arch_kgdb_ops = {
4353+const struct kgdb_arch arch_kgdb_ops = {
4354 /* Breakpoint instruction: ta 0x72 */
4355 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4356 };
4357diff -urNp linux-2.6.32.45/arch/sparc/kernel/Makefile linux-2.6.32.45/arch/sparc/kernel/Makefile
4358--- linux-2.6.32.45/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4359+++ linux-2.6.32.45/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4360@@ -3,7 +3,7 @@
4361 #
4362
4363 asflags-y := -ansi
4364-ccflags-y := -Werror
4365+#ccflags-y := -Werror
4366
4367 extra-y := head_$(BITS).o
4368 extra-y += init_task.o
4369diff -urNp linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c
4370--- linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4371+++ linux-2.6.32.45/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4372@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4373 spin_unlock_irqrestore(&iommu->lock, flags);
4374 }
4375
4376-static struct dma_map_ops sun4v_dma_ops = {
4377+static const struct dma_map_ops sun4v_dma_ops = {
4378 .alloc_coherent = dma_4v_alloc_coherent,
4379 .free_coherent = dma_4v_free_coherent,
4380 .map_page = dma_4v_map_page,
4381diff -urNp linux-2.6.32.45/arch/sparc/kernel/process_32.c linux-2.6.32.45/arch/sparc/kernel/process_32.c
4382--- linux-2.6.32.45/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4383+++ linux-2.6.32.45/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4384@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4385 rw->ins[4], rw->ins[5],
4386 rw->ins[6],
4387 rw->ins[7]);
4388- printk("%pS\n", (void *) rw->ins[7]);
4389+ printk("%pA\n", (void *) rw->ins[7]);
4390 rw = (struct reg_window32 *) rw->ins[6];
4391 }
4392 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4393@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4394
4395 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4396 r->psr, r->pc, r->npc, r->y, print_tainted());
4397- printk("PC: <%pS>\n", (void *) r->pc);
4398+ printk("PC: <%pA>\n", (void *) r->pc);
4399 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4400 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4401 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4402 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4403 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4404 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4405- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4406+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4407
4408 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4409 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4410@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4411 rw = (struct reg_window32 *) fp;
4412 pc = rw->ins[7];
4413 printk("[%08lx : ", pc);
4414- printk("%pS ] ", (void *) pc);
4415+ printk("%pA ] ", (void *) pc);
4416 fp = rw->ins[6];
4417 } while (++count < 16);
4418 printk("\n");
4419diff -urNp linux-2.6.32.45/arch/sparc/kernel/process_64.c linux-2.6.32.45/arch/sparc/kernel/process_64.c
4420--- linux-2.6.32.45/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4421+++ linux-2.6.32.45/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4422@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4423 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4424 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4425 if (regs->tstate & TSTATE_PRIV)
4426- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4427+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4428 }
4429
4430 void show_regs(struct pt_regs *regs)
4431 {
4432 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4433 regs->tpc, regs->tnpc, regs->y, print_tainted());
4434- printk("TPC: <%pS>\n", (void *) regs->tpc);
4435+ printk("TPC: <%pA>\n", (void *) regs->tpc);
4436 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4437 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4438 regs->u_regs[3]);
4439@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4440 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4441 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4442 regs->u_regs[15]);
4443- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4444+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4445 show_regwindow(regs);
4446 }
4447
4448@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4449 ((tp && tp->task) ? tp->task->pid : -1));
4450
4451 if (gp->tstate & TSTATE_PRIV) {
4452- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4453+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4454 (void *) gp->tpc,
4455 (void *) gp->o7,
4456 (void *) gp->i7,
4457diff -urNp linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c
4458--- linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4459+++ linux-2.6.32.45/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4460@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4461 if (ARCH_SUN4C && len > 0x20000000)
4462 return -ENOMEM;
4463 if (!addr)
4464- addr = TASK_UNMAPPED_BASE;
4465+ addr = current->mm->mmap_base;
4466
4467 if (flags & MAP_SHARED)
4468 addr = COLOUR_ALIGN(addr);
4469@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4470 }
4471 if (TASK_SIZE - PAGE_SIZE - len < addr)
4472 return -ENOMEM;
4473- if (!vmm || addr + len <= vmm->vm_start)
4474+ if (check_heap_stack_gap(vmm, addr, len))
4475 return addr;
4476 addr = vmm->vm_end;
4477 if (flags & MAP_SHARED)
4478diff -urNp linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c
4479--- linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4480+++ linux-2.6.32.45/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4481@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4482 /* We do not accept a shared mapping if it would violate
4483 * cache aliasing constraints.
4484 */
4485- if ((flags & MAP_SHARED) &&
4486+ if ((filp || (flags & MAP_SHARED)) &&
4487 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4488 return -EINVAL;
4489 return addr;
4490@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4491 if (filp || (flags & MAP_SHARED))
4492 do_color_align = 1;
4493
4494+#ifdef CONFIG_PAX_RANDMMAP
4495+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4496+#endif
4497+
4498 if (addr) {
4499 if (do_color_align)
4500 addr = COLOUR_ALIGN(addr, pgoff);
4501@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4502 addr = PAGE_ALIGN(addr);
4503
4504 vma = find_vma(mm, addr);
4505- if (task_size - len >= addr &&
4506- (!vma || addr + len <= vma->vm_start))
4507+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4508 return addr;
4509 }
4510
4511 if (len > mm->cached_hole_size) {
4512- start_addr = addr = mm->free_area_cache;
4513+ start_addr = addr = mm->free_area_cache;
4514 } else {
4515- start_addr = addr = TASK_UNMAPPED_BASE;
4516+ start_addr = addr = mm->mmap_base;
4517 mm->cached_hole_size = 0;
4518 }
4519
4520@@ -175,14 +178,14 @@ full_search:
4521 vma = find_vma(mm, VA_EXCLUDE_END);
4522 }
4523 if (unlikely(task_size < addr)) {
4524- if (start_addr != TASK_UNMAPPED_BASE) {
4525- start_addr = addr = TASK_UNMAPPED_BASE;
4526+ if (start_addr != mm->mmap_base) {
4527+ start_addr = addr = mm->mmap_base;
4528 mm->cached_hole_size = 0;
4529 goto full_search;
4530 }
4531 return -ENOMEM;
4532 }
4533- if (likely(!vma || addr + len <= vma->vm_start)) {
4534+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4535 /*
4536 * Remember the place where we stopped the search:
4537 */
4538@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4539 /* We do not accept a shared mapping if it would violate
4540 * cache aliasing constraints.
4541 */
4542- if ((flags & MAP_SHARED) &&
4543+ if ((filp || (flags & MAP_SHARED)) &&
4544 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4545 return -EINVAL;
4546 return addr;
4547@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4548 addr = PAGE_ALIGN(addr);
4549
4550 vma = find_vma(mm, addr);
4551- if (task_size - len >= addr &&
4552- (!vma || addr + len <= vma->vm_start))
4553+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4554 return addr;
4555 }
4556
4557@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4558 /* make sure it can fit in the remaining address space */
4559 if (likely(addr > len)) {
4560 vma = find_vma(mm, addr-len);
4561- if (!vma || addr <= vma->vm_start) {
4562+ if (check_heap_stack_gap(vma, addr - len, len)) {
4563 /* remember the address as a hint for next time */
4564 return (mm->free_area_cache = addr-len);
4565 }
4566@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4567 if (unlikely(mm->mmap_base < len))
4568 goto bottomup;
4569
4570- addr = mm->mmap_base-len;
4571- if (do_color_align)
4572- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4573+ addr = mm->mmap_base - len;
4574
4575 do {
4576+ if (do_color_align)
4577+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4578 /*
4579 * Lookup failure means no vma is above this address,
4580 * else if new region fits below vma->vm_start,
4581 * return with success:
4582 */
4583 vma = find_vma(mm, addr);
4584- if (likely(!vma || addr+len <= vma->vm_start)) {
4585+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4586 /* remember the address as a hint for next time */
4587 return (mm->free_area_cache = addr);
4588 }
4589@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4590 mm->cached_hole_size = vma->vm_start - addr;
4591
4592 /* try just below the current vma->vm_start */
4593- addr = vma->vm_start-len;
4594- if (do_color_align)
4595- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4596- } while (likely(len < vma->vm_start));
4597+ addr = skip_heap_stack_gap(vma, len);
4598+ } while (!IS_ERR_VALUE(addr));
4599
4600 bottomup:
4601 /*
4602@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4603 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4604 sysctl_legacy_va_layout) {
4605 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4606+
4607+#ifdef CONFIG_PAX_RANDMMAP
4608+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4609+ mm->mmap_base += mm->delta_mmap;
4610+#endif
4611+
4612 mm->get_unmapped_area = arch_get_unmapped_area;
4613 mm->unmap_area = arch_unmap_area;
4614 } else {
4615@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4616 gap = (task_size / 6 * 5);
4617
4618 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4619+
4620+#ifdef CONFIG_PAX_RANDMMAP
4621+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4622+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4623+#endif
4624+
4625 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4626 mm->unmap_area = arch_unmap_area_topdown;
4627 }
4628diff -urNp linux-2.6.32.45/arch/sparc/kernel/traps_32.c linux-2.6.32.45/arch/sparc/kernel/traps_32.c
4629--- linux-2.6.32.45/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4630+++ linux-2.6.32.45/arch/sparc/kernel/traps_32.c 2011-06-13 21:25:39.000000000 -0400
4631@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4632 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4633 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4634
4635+extern void gr_handle_kernel_exploit(void);
4636+
4637 void die_if_kernel(char *str, struct pt_regs *regs)
4638 {
4639 static int die_counter;
4640@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4641 count++ < 30 &&
4642 (((unsigned long) rw) >= PAGE_OFFSET) &&
4643 !(((unsigned long) rw) & 0x7)) {
4644- printk("Caller[%08lx]: %pS\n", rw->ins[7],
4645+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
4646 (void *) rw->ins[7]);
4647 rw = (struct reg_window32 *)rw->ins[6];
4648 }
4649 }
4650 printk("Instruction DUMP:");
4651 instruction_dump ((unsigned long *) regs->pc);
4652- if(regs->psr & PSR_PS)
4653+ if(regs->psr & PSR_PS) {
4654+ gr_handle_kernel_exploit();
4655 do_exit(SIGKILL);
4656+ }
4657 do_exit(SIGSEGV);
4658 }
4659
4660diff -urNp linux-2.6.32.45/arch/sparc/kernel/traps_64.c linux-2.6.32.45/arch/sparc/kernel/traps_64.c
4661--- linux-2.6.32.45/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4662+++ linux-2.6.32.45/arch/sparc/kernel/traps_64.c 2011-06-13 21:24:11.000000000 -0400
4663@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4664 i + 1,
4665 p->trapstack[i].tstate, p->trapstack[i].tpc,
4666 p->trapstack[i].tnpc, p->trapstack[i].tt);
4667- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4668+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4669 }
4670 }
4671
4672@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4673
4674 lvl -= 0x100;
4675 if (regs->tstate & TSTATE_PRIV) {
4676+
4677+#ifdef CONFIG_PAX_REFCOUNT
4678+ if (lvl == 6)
4679+ pax_report_refcount_overflow(regs);
4680+#endif
4681+
4682 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4683 die_if_kernel(buffer, regs);
4684 }
4685@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4686 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4687 {
4688 char buffer[32];
4689-
4690+
4691 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4692 0, lvl, SIGTRAP) == NOTIFY_STOP)
4693 return;
4694
4695+#ifdef CONFIG_PAX_REFCOUNT
4696+ if (lvl == 6)
4697+ pax_report_refcount_overflow(regs);
4698+#endif
4699+
4700 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4701
4702 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4703@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4704 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4705 printk("%s" "ERROR(%d): ",
4706 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4707- printk("TPC<%pS>\n", (void *) regs->tpc);
4708+ printk("TPC<%pA>\n", (void *) regs->tpc);
4709 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4710 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4711 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4712@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4713 smp_processor_id(),
4714 (type & 0x1) ? 'I' : 'D',
4715 regs->tpc);
4716- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4717+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4718 panic("Irrecoverable Cheetah+ parity error.");
4719 }
4720
4721@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4722 smp_processor_id(),
4723 (type & 0x1) ? 'I' : 'D',
4724 regs->tpc);
4725- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4726+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4727 }
4728
4729 struct sun4v_error_entry {
4730@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4731
4732 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4733 regs->tpc, tl);
4734- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4735+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4736 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4737- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4738+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4739 (void *) regs->u_regs[UREG_I7]);
4740 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4741 "pte[%lx] error[%lx]\n",
4742@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4743
4744 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4745 regs->tpc, tl);
4746- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4747+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4748 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4749- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4750+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4751 (void *) regs->u_regs[UREG_I7]);
4752 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4753 "pte[%lx] error[%lx]\n",
4754@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4755 fp = (unsigned long)sf->fp + STACK_BIAS;
4756 }
4757
4758- printk(" [%016lx] %pS\n", pc, (void *) pc);
4759+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4760 } while (++count < 16);
4761 }
4762
4763@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4764 return (struct reg_window *) (fp + STACK_BIAS);
4765 }
4766
4767+extern void gr_handle_kernel_exploit(void);
4768+
4769 void die_if_kernel(char *str, struct pt_regs *regs)
4770 {
4771 static int die_counter;
4772@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4773 while (rw &&
4774 count++ < 30&&
4775 is_kernel_stack(current, rw)) {
4776- printk("Caller[%016lx]: %pS\n", rw->ins[7],
4777+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
4778 (void *) rw->ins[7]);
4779
4780 rw = kernel_stack_up(rw);
4781@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4782 }
4783 user_instruction_dump ((unsigned int __user *) regs->tpc);
4784 }
4785- if (regs->tstate & TSTATE_PRIV)
4786+ if (regs->tstate & TSTATE_PRIV) {
4787+ gr_handle_kernel_exploit();
4788 do_exit(SIGKILL);
4789+ }
4790+
4791 do_exit(SIGSEGV);
4792 }
4793 EXPORT_SYMBOL(die_if_kernel);
4794diff -urNp linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S
4795--- linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S 2011-03-27 14:31:47.000000000 -0400
4796+++ linux-2.6.32.45/arch/sparc/kernel/una_asm_64.S 2011-07-13 22:20:05.000000000 -0400
4797@@ -127,7 +127,7 @@ do_int_load:
4798 wr %o5, 0x0, %asi
4799 retl
4800 mov 0, %o0
4801- .size __do_int_load, .-__do_int_load
4802+ .size do_int_load, .-do_int_load
4803
4804 .section __ex_table,"a"
4805 .word 4b, __retl_efault
4806diff -urNp linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c
4807--- linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4808+++ linux-2.6.32.45/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4809@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4810 if (count < 5) {
4811 last_time = jiffies;
4812 count++;
4813- printk("Kernel unaligned access at TPC[%lx] %pS\n",
4814+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
4815 regs->tpc, (void *) regs->tpc);
4816 }
4817 }
4818diff -urNp linux-2.6.32.45/arch/sparc/lib/atomic_64.S linux-2.6.32.45/arch/sparc/lib/atomic_64.S
4819--- linux-2.6.32.45/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4820+++ linux-2.6.32.45/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4821@@ -18,7 +18,12 @@
4822 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4823 BACKOFF_SETUP(%o2)
4824 1: lduw [%o1], %g1
4825- add %g1, %o0, %g7
4826+ addcc %g1, %o0, %g7
4827+
4828+#ifdef CONFIG_PAX_REFCOUNT
4829+ tvs %icc, 6
4830+#endif
4831+
4832 cas [%o1], %g1, %g7
4833 cmp %g1, %g7
4834 bne,pn %icc, 2f
4835@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4836 2: BACKOFF_SPIN(%o2, %o3, 1b)
4837 .size atomic_add, .-atomic_add
4838
4839+ .globl atomic_add_unchecked
4840+ .type atomic_add_unchecked,#function
4841+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4842+ BACKOFF_SETUP(%o2)
4843+1: lduw [%o1], %g1
4844+ add %g1, %o0, %g7
4845+ cas [%o1], %g1, %g7
4846+ cmp %g1, %g7
4847+ bne,pn %icc, 2f
4848+ nop
4849+ retl
4850+ nop
4851+2: BACKOFF_SPIN(%o2, %o3, 1b)
4852+ .size atomic_add_unchecked, .-atomic_add_unchecked
4853+
4854 .globl atomic_sub
4855 .type atomic_sub,#function
4856 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4857 BACKOFF_SETUP(%o2)
4858 1: lduw [%o1], %g1
4859- sub %g1, %o0, %g7
4860+ subcc %g1, %o0, %g7
4861+
4862+#ifdef CONFIG_PAX_REFCOUNT
4863+ tvs %icc, 6
4864+#endif
4865+
4866 cas [%o1], %g1, %g7
4867 cmp %g1, %g7
4868 bne,pn %icc, 2f
4869@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4870 2: BACKOFF_SPIN(%o2, %o3, 1b)
4871 .size atomic_sub, .-atomic_sub
4872
4873+ .globl atomic_sub_unchecked
4874+ .type atomic_sub_unchecked,#function
4875+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4876+ BACKOFF_SETUP(%o2)
4877+1: lduw [%o1], %g1
4878+ sub %g1, %o0, %g7
4879+ cas [%o1], %g1, %g7
4880+ cmp %g1, %g7
4881+ bne,pn %icc, 2f
4882+ nop
4883+ retl
4884+ nop
4885+2: BACKOFF_SPIN(%o2, %o3, 1b)
4886+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
4887+
4888 .globl atomic_add_ret
4889 .type atomic_add_ret,#function
4890 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4891 BACKOFF_SETUP(%o2)
4892 1: lduw [%o1], %g1
4893- add %g1, %o0, %g7
4894+ addcc %g1, %o0, %g7
4895+
4896+#ifdef CONFIG_PAX_REFCOUNT
4897+ tvs %icc, 6
4898+#endif
4899+
4900 cas [%o1], %g1, %g7
4901 cmp %g1, %g7
4902 bne,pn %icc, 2f
4903@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4904 2: BACKOFF_SPIN(%o2, %o3, 1b)
4905 .size atomic_add_ret, .-atomic_add_ret
4906
4907+ .globl atomic_add_ret_unchecked
4908+ .type atomic_add_ret_unchecked,#function
4909+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4910+ BACKOFF_SETUP(%o2)
4911+1: lduw [%o1], %g1
4912+ addcc %g1, %o0, %g7
4913+ cas [%o1], %g1, %g7
4914+ cmp %g1, %g7
4915+ bne,pn %icc, 2f
4916+ add %g7, %o0, %g7
4917+ sra %g7, 0, %o0
4918+ retl
4919+ nop
4920+2: BACKOFF_SPIN(%o2, %o3, 1b)
4921+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4922+
4923 .globl atomic_sub_ret
4924 .type atomic_sub_ret,#function
4925 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4926 BACKOFF_SETUP(%o2)
4927 1: lduw [%o1], %g1
4928- sub %g1, %o0, %g7
4929+ subcc %g1, %o0, %g7
4930+
4931+#ifdef CONFIG_PAX_REFCOUNT
4932+ tvs %icc, 6
4933+#endif
4934+
4935 cas [%o1], %g1, %g7
4936 cmp %g1, %g7
4937 bne,pn %icc, 2f
4938@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4939 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4940 BACKOFF_SETUP(%o2)
4941 1: ldx [%o1], %g1
4942- add %g1, %o0, %g7
4943+ addcc %g1, %o0, %g7
4944+
4945+#ifdef CONFIG_PAX_REFCOUNT
4946+ tvs %xcc, 6
4947+#endif
4948+
4949 casx [%o1], %g1, %g7
4950 cmp %g1, %g7
4951 bne,pn %xcc, 2f
4952@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4953 2: BACKOFF_SPIN(%o2, %o3, 1b)
4954 .size atomic64_add, .-atomic64_add
4955
4956+ .globl atomic64_add_unchecked
4957+ .type atomic64_add_unchecked,#function
4958+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4959+ BACKOFF_SETUP(%o2)
4960+1: ldx [%o1], %g1
4961+ addcc %g1, %o0, %g7
4962+ casx [%o1], %g1, %g7
4963+ cmp %g1, %g7
4964+ bne,pn %xcc, 2f
4965+ nop
4966+ retl
4967+ nop
4968+2: BACKOFF_SPIN(%o2, %o3, 1b)
4969+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
4970+
4971 .globl atomic64_sub
4972 .type atomic64_sub,#function
4973 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4974 BACKOFF_SETUP(%o2)
4975 1: ldx [%o1], %g1
4976- sub %g1, %o0, %g7
4977+ subcc %g1, %o0, %g7
4978+
4979+#ifdef CONFIG_PAX_REFCOUNT
4980+ tvs %xcc, 6
4981+#endif
4982+
4983 casx [%o1], %g1, %g7
4984 cmp %g1, %g7
4985 bne,pn %xcc, 2f
4986@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4987 2: BACKOFF_SPIN(%o2, %o3, 1b)
4988 .size atomic64_sub, .-atomic64_sub
4989
4990+ .globl atomic64_sub_unchecked
4991+ .type atomic64_sub_unchecked,#function
4992+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4993+ BACKOFF_SETUP(%o2)
4994+1: ldx [%o1], %g1
4995+ subcc %g1, %o0, %g7
4996+ casx [%o1], %g1, %g7
4997+ cmp %g1, %g7
4998+ bne,pn %xcc, 2f
4999+ nop
5000+ retl
5001+ nop
5002+2: BACKOFF_SPIN(%o2, %o3, 1b)
5003+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5004+
5005 .globl atomic64_add_ret
5006 .type atomic64_add_ret,#function
5007 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5008 BACKOFF_SETUP(%o2)
5009 1: ldx [%o1], %g1
5010- add %g1, %o0, %g7
5011+ addcc %g1, %o0, %g7
5012+
5013+#ifdef CONFIG_PAX_REFCOUNT
5014+ tvs %xcc, 6
5015+#endif
5016+
5017 casx [%o1], %g1, %g7
5018 cmp %g1, %g7
5019 bne,pn %xcc, 2f
5020@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
5021 2: BACKOFF_SPIN(%o2, %o3, 1b)
5022 .size atomic64_add_ret, .-atomic64_add_ret
5023
5024+ .globl atomic64_add_ret_unchecked
5025+ .type atomic64_add_ret_unchecked,#function
5026+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5027+ BACKOFF_SETUP(%o2)
5028+1: ldx [%o1], %g1
5029+ addcc %g1, %o0, %g7
5030+ casx [%o1], %g1, %g7
5031+ cmp %g1, %g7
5032+ bne,pn %xcc, 2f
5033+ add %g7, %o0, %g7
5034+ mov %g7, %o0
5035+ retl
5036+ nop
5037+2: BACKOFF_SPIN(%o2, %o3, 1b)
5038+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5039+
5040 .globl atomic64_sub_ret
5041 .type atomic64_sub_ret,#function
5042 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5043 BACKOFF_SETUP(%o2)
5044 1: ldx [%o1], %g1
5045- sub %g1, %o0, %g7
5046+ subcc %g1, %o0, %g7
5047+
5048+#ifdef CONFIG_PAX_REFCOUNT
5049+ tvs %xcc, 6
5050+#endif
5051+
5052 casx [%o1], %g1, %g7
5053 cmp %g1, %g7
5054 bne,pn %xcc, 2f
5055diff -urNp linux-2.6.32.45/arch/sparc/lib/ksyms.c linux-2.6.32.45/arch/sparc/lib/ksyms.c
5056--- linux-2.6.32.45/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
5057+++ linux-2.6.32.45/arch/sparc/lib/ksyms.c 2011-08-19 23:05:14.000000000 -0400
5058@@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5059
5060 /* Atomic counter implementation. */
5061 EXPORT_SYMBOL(atomic_add);
5062+EXPORT_SYMBOL(atomic_add_unchecked);
5063 EXPORT_SYMBOL(atomic_add_ret);
5064+EXPORT_SYMBOL(atomic_add_ret_unchecked);
5065 EXPORT_SYMBOL(atomic_sub);
5066+EXPORT_SYMBOL(atomic_sub_unchecked);
5067 EXPORT_SYMBOL(atomic_sub_ret);
5068 EXPORT_SYMBOL(atomic64_add);
5069+EXPORT_SYMBOL(atomic64_add_unchecked);
5070 EXPORT_SYMBOL(atomic64_add_ret);
5071+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5072 EXPORT_SYMBOL(atomic64_sub);
5073+EXPORT_SYMBOL(atomic64_sub_unchecked);
5074 EXPORT_SYMBOL(atomic64_sub_ret);
5075
5076 /* Atomic bit operations. */
5077diff -urNp linux-2.6.32.45/arch/sparc/lib/Makefile linux-2.6.32.45/arch/sparc/lib/Makefile
5078--- linux-2.6.32.45/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
5079+++ linux-2.6.32.45/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
5080@@ -2,7 +2,7 @@
5081 #
5082
5083 asflags-y := -ansi -DST_DIV0=0x02
5084-ccflags-y := -Werror
5085+#ccflags-y := -Werror
5086
5087 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5088 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5089diff -urNp linux-2.6.32.45/arch/sparc/lib/rwsem_64.S linux-2.6.32.45/arch/sparc/lib/rwsem_64.S
5090--- linux-2.6.32.45/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
5091+++ linux-2.6.32.45/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
5092@@ -11,7 +11,12 @@
5093 .globl __down_read
5094 __down_read:
5095 1: lduw [%o0], %g1
5096- add %g1, 1, %g7
5097+ addcc %g1, 1, %g7
5098+
5099+#ifdef CONFIG_PAX_REFCOUNT
5100+ tvs %icc, 6
5101+#endif
5102+
5103 cas [%o0], %g1, %g7
5104 cmp %g1, %g7
5105 bne,pn %icc, 1b
5106@@ -33,7 +38,12 @@ __down_read:
5107 .globl __down_read_trylock
5108 __down_read_trylock:
5109 1: lduw [%o0], %g1
5110- add %g1, 1, %g7
5111+ addcc %g1, 1, %g7
5112+
5113+#ifdef CONFIG_PAX_REFCOUNT
5114+ tvs %icc, 6
5115+#endif
5116+
5117 cmp %g7, 0
5118 bl,pn %icc, 2f
5119 mov 0, %o1
5120@@ -51,7 +61,12 @@ __down_write:
5121 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5122 1:
5123 lduw [%o0], %g3
5124- add %g3, %g1, %g7
5125+ addcc %g3, %g1, %g7
5126+
5127+#ifdef CONFIG_PAX_REFCOUNT
5128+ tvs %icc, 6
5129+#endif
5130+
5131 cas [%o0], %g3, %g7
5132 cmp %g3, %g7
5133 bne,pn %icc, 1b
5134@@ -77,7 +92,12 @@ __down_write_trylock:
5135 cmp %g3, 0
5136 bne,pn %icc, 2f
5137 mov 0, %o1
5138- add %g3, %g1, %g7
5139+ addcc %g3, %g1, %g7
5140+
5141+#ifdef CONFIG_PAX_REFCOUNT
5142+ tvs %icc, 6
5143+#endif
5144+
5145 cas [%o0], %g3, %g7
5146 cmp %g3, %g7
5147 bne,pn %icc, 1b
5148@@ -90,7 +110,12 @@ __down_write_trylock:
5149 __up_read:
5150 1:
5151 lduw [%o0], %g1
5152- sub %g1, 1, %g7
5153+ subcc %g1, 1, %g7
5154+
5155+#ifdef CONFIG_PAX_REFCOUNT
5156+ tvs %icc, 6
5157+#endif
5158+
5159 cas [%o0], %g1, %g7
5160 cmp %g1, %g7
5161 bne,pn %icc, 1b
5162@@ -118,7 +143,12 @@ __up_write:
5163 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5164 1:
5165 lduw [%o0], %g3
5166- sub %g3, %g1, %g7
5167+ subcc %g3, %g1, %g7
5168+
5169+#ifdef CONFIG_PAX_REFCOUNT
5170+ tvs %icc, 6
5171+#endif
5172+
5173 cas [%o0], %g3, %g7
5174 cmp %g3, %g7
5175 bne,pn %icc, 1b
5176@@ -143,7 +173,12 @@ __downgrade_write:
5177 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5178 1:
5179 lduw [%o0], %g3
5180- sub %g3, %g1, %g7
5181+ subcc %g3, %g1, %g7
5182+
5183+#ifdef CONFIG_PAX_REFCOUNT
5184+ tvs %icc, 6
5185+#endif
5186+
5187 cas [%o0], %g3, %g7
5188 cmp %g3, %g7
5189 bne,pn %icc, 1b
5190diff -urNp linux-2.6.32.45/arch/sparc/Makefile linux-2.6.32.45/arch/sparc/Makefile
5191--- linux-2.6.32.45/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
5192+++ linux-2.6.32.45/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
5193@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
5194 # Export what is needed by arch/sparc/boot/Makefile
5195 export VMLINUX_INIT VMLINUX_MAIN
5196 VMLINUX_INIT := $(head-y) $(init-y)
5197-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5198+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5199 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5200 VMLINUX_MAIN += $(drivers-y) $(net-y)
5201
5202diff -urNp linux-2.6.32.45/arch/sparc/mm/fault_32.c linux-2.6.32.45/arch/sparc/mm/fault_32.c
5203--- linux-2.6.32.45/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
5204+++ linux-2.6.32.45/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
5205@@ -21,6 +21,9 @@
5206 #include <linux/interrupt.h>
5207 #include <linux/module.h>
5208 #include <linux/kdebug.h>
5209+#include <linux/slab.h>
5210+#include <linux/pagemap.h>
5211+#include <linux/compiler.h>
5212
5213 #include <asm/system.h>
5214 #include <asm/page.h>
5215@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
5216 return safe_compute_effective_address(regs, insn);
5217 }
5218
5219+#ifdef CONFIG_PAX_PAGEEXEC
5220+#ifdef CONFIG_PAX_DLRESOLVE
5221+static void pax_emuplt_close(struct vm_area_struct *vma)
5222+{
5223+ vma->vm_mm->call_dl_resolve = 0UL;
5224+}
5225+
5226+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5227+{
5228+ unsigned int *kaddr;
5229+
5230+ vmf->page = alloc_page(GFP_HIGHUSER);
5231+ if (!vmf->page)
5232+ return VM_FAULT_OOM;
5233+
5234+ kaddr = kmap(vmf->page);
5235+ memset(kaddr, 0, PAGE_SIZE);
5236+ kaddr[0] = 0x9DE3BFA8U; /* save */
5237+ flush_dcache_page(vmf->page);
5238+ kunmap(vmf->page);
5239+ return VM_FAULT_MAJOR;
5240+}
5241+
5242+static const struct vm_operations_struct pax_vm_ops = {
5243+ .close = pax_emuplt_close,
5244+ .fault = pax_emuplt_fault
5245+};
5246+
5247+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5248+{
5249+ int ret;
5250+
5251+ vma->vm_mm = current->mm;
5252+ vma->vm_start = addr;
5253+ vma->vm_end = addr + PAGE_SIZE;
5254+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5255+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5256+ vma->vm_ops = &pax_vm_ops;
5257+
5258+ ret = insert_vm_struct(current->mm, vma);
5259+ if (ret)
5260+ return ret;
5261+
5262+ ++current->mm->total_vm;
5263+ return 0;
5264+}
5265+#endif
5266+
5267+/*
5268+ * PaX: decide what to do with offenders (regs->pc = fault address)
5269+ *
5270+ * returns 1 when task should be killed
5271+ * 2 when patched PLT trampoline was detected
5272+ * 3 when unpatched PLT trampoline was detected
5273+ */
5274+static int pax_handle_fetch_fault(struct pt_regs *regs)
5275+{
5276+
5277+#ifdef CONFIG_PAX_EMUPLT
5278+ int err;
5279+
5280+ do { /* PaX: patched PLT emulation #1 */
5281+ unsigned int sethi1, sethi2, jmpl;
5282+
5283+ err = get_user(sethi1, (unsigned int *)regs->pc);
5284+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5285+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5286+
5287+ if (err)
5288+ break;
5289+
5290+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5291+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5292+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5293+ {
5294+ unsigned int addr;
5295+
5296+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5297+ addr = regs->u_regs[UREG_G1];
5298+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5299+ regs->pc = addr;
5300+ regs->npc = addr+4;
5301+ return 2;
5302+ }
5303+ } while (0);
5304+
5305+ { /* PaX: patched PLT emulation #2 */
5306+ unsigned int ba;
5307+
5308+ err = get_user(ba, (unsigned int *)regs->pc);
5309+
5310+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5311+ unsigned int addr;
5312+
5313+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5314+ regs->pc = addr;
5315+ regs->npc = addr+4;
5316+ return 2;
5317+ }
5318+ }
5319+
5320+ do { /* PaX: patched PLT emulation #3 */
5321+ unsigned int sethi, jmpl, nop;
5322+
5323+ err = get_user(sethi, (unsigned int *)regs->pc);
5324+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5325+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5326+
5327+ if (err)
5328+ break;
5329+
5330+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5331+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5332+ nop == 0x01000000U)
5333+ {
5334+ unsigned int addr;
5335+
5336+ addr = (sethi & 0x003FFFFFU) << 10;
5337+ regs->u_regs[UREG_G1] = addr;
5338+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5339+ regs->pc = addr;
5340+ regs->npc = addr+4;
5341+ return 2;
5342+ }
5343+ } while (0);
5344+
5345+ do { /* PaX: unpatched PLT emulation step 1 */
5346+ unsigned int sethi, ba, nop;
5347+
5348+ err = get_user(sethi, (unsigned int *)regs->pc);
5349+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
5350+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5351+
5352+ if (err)
5353+ break;
5354+
5355+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5356+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5357+ nop == 0x01000000U)
5358+ {
5359+ unsigned int addr, save, call;
5360+
5361+ if ((ba & 0xFFC00000U) == 0x30800000U)
5362+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5363+ else
5364+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5365+
5366+ err = get_user(save, (unsigned int *)addr);
5367+ err |= get_user(call, (unsigned int *)(addr+4));
5368+ err |= get_user(nop, (unsigned int *)(addr+8));
5369+ if (err)
5370+ break;
5371+
5372+#ifdef CONFIG_PAX_DLRESOLVE
5373+ if (save == 0x9DE3BFA8U &&
5374+ (call & 0xC0000000U) == 0x40000000U &&
5375+ nop == 0x01000000U)
5376+ {
5377+ struct vm_area_struct *vma;
5378+ unsigned long call_dl_resolve;
5379+
5380+ down_read(&current->mm->mmap_sem);
5381+ call_dl_resolve = current->mm->call_dl_resolve;
5382+ up_read(&current->mm->mmap_sem);
5383+ if (likely(call_dl_resolve))
5384+ goto emulate;
5385+
5386+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5387+
5388+ down_write(&current->mm->mmap_sem);
5389+ if (current->mm->call_dl_resolve) {
5390+ call_dl_resolve = current->mm->call_dl_resolve;
5391+ up_write(&current->mm->mmap_sem);
5392+ if (vma)
5393+ kmem_cache_free(vm_area_cachep, vma);
5394+ goto emulate;
5395+ }
5396+
5397+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5398+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5399+ up_write(&current->mm->mmap_sem);
5400+ if (vma)
5401+ kmem_cache_free(vm_area_cachep, vma);
5402+ return 1;
5403+ }
5404+
5405+ if (pax_insert_vma(vma, call_dl_resolve)) {
5406+ up_write(&current->mm->mmap_sem);
5407+ kmem_cache_free(vm_area_cachep, vma);
5408+ return 1;
5409+ }
5410+
5411+ current->mm->call_dl_resolve = call_dl_resolve;
5412+ up_write(&current->mm->mmap_sem);
5413+
5414+emulate:
5415+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5416+ regs->pc = call_dl_resolve;
5417+ regs->npc = addr+4;
5418+ return 3;
5419+ }
5420+#endif
5421+
5422+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5423+ if ((save & 0xFFC00000U) == 0x05000000U &&
5424+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5425+ nop == 0x01000000U)
5426+ {
5427+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5428+ regs->u_regs[UREG_G2] = addr + 4;
5429+ addr = (save & 0x003FFFFFU) << 10;
5430+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5431+ regs->pc = addr;
5432+ regs->npc = addr+4;
5433+ return 3;
5434+ }
5435+ }
5436+ } while (0);
5437+
5438+ do { /* PaX: unpatched PLT emulation step 2 */
5439+ unsigned int save, call, nop;
5440+
5441+ err = get_user(save, (unsigned int *)(regs->pc-4));
5442+ err |= get_user(call, (unsigned int *)regs->pc);
5443+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
5444+ if (err)
5445+ break;
5446+
5447+ if (save == 0x9DE3BFA8U &&
5448+ (call & 0xC0000000U) == 0x40000000U &&
5449+ nop == 0x01000000U)
5450+ {
5451+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5452+
5453+ regs->u_regs[UREG_RETPC] = regs->pc;
5454+ regs->pc = dl_resolve;
5455+ regs->npc = dl_resolve+4;
5456+ return 3;
5457+ }
5458+ } while (0);
5459+#endif
5460+
5461+ return 1;
5462+}
5463+
5464+void pax_report_insns(void *pc, void *sp)
5465+{
5466+ unsigned long i;
5467+
5468+ printk(KERN_ERR "PAX: bytes at PC: ");
5469+ for (i = 0; i < 8; i++) {
5470+ unsigned int c;
5471+ if (get_user(c, (unsigned int *)pc+i))
5472+ printk(KERN_CONT "???????? ");
5473+ else
5474+ printk(KERN_CONT "%08x ", c);
5475+ }
5476+ printk("\n");
5477+}
5478+#endif
5479+
5480 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5481 unsigned long address)
5482 {
5483@@ -231,6 +495,24 @@ good_area:
5484 if(!(vma->vm_flags & VM_WRITE))
5485 goto bad_area;
5486 } else {
5487+
5488+#ifdef CONFIG_PAX_PAGEEXEC
5489+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5490+ up_read(&mm->mmap_sem);
5491+ switch (pax_handle_fetch_fault(regs)) {
5492+
5493+#ifdef CONFIG_PAX_EMUPLT
5494+ case 2:
5495+ case 3:
5496+ return;
5497+#endif
5498+
5499+ }
5500+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5501+ do_group_exit(SIGKILL);
5502+ }
5503+#endif
5504+
5505 /* Allow reads even for write-only mappings */
5506 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5507 goto bad_area;
5508diff -urNp linux-2.6.32.45/arch/sparc/mm/fault_64.c linux-2.6.32.45/arch/sparc/mm/fault_64.c
5509--- linux-2.6.32.45/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5510+++ linux-2.6.32.45/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5511@@ -20,6 +20,9 @@
5512 #include <linux/kprobes.h>
5513 #include <linux/kdebug.h>
5514 #include <linux/percpu.h>
5515+#include <linux/slab.h>
5516+#include <linux/pagemap.h>
5517+#include <linux/compiler.h>
5518
5519 #include <asm/page.h>
5520 #include <asm/pgtable.h>
5521@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5522 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5523 regs->tpc);
5524 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5525- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5526+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5527 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5528 dump_stack();
5529 unhandled_fault(regs->tpc, current, regs);
5530@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5531 show_regs(regs);
5532 }
5533
5534+#ifdef CONFIG_PAX_PAGEEXEC
5535+#ifdef CONFIG_PAX_DLRESOLVE
5536+static void pax_emuplt_close(struct vm_area_struct *vma)
5537+{
5538+ vma->vm_mm->call_dl_resolve = 0UL;
5539+}
5540+
5541+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5542+{
5543+ unsigned int *kaddr;
5544+
5545+ vmf->page = alloc_page(GFP_HIGHUSER);
5546+ if (!vmf->page)
5547+ return VM_FAULT_OOM;
5548+
5549+ kaddr = kmap(vmf->page);
5550+ memset(kaddr, 0, PAGE_SIZE);
5551+ kaddr[0] = 0x9DE3BFA8U; /* save */
5552+ flush_dcache_page(vmf->page);
5553+ kunmap(vmf->page);
5554+ return VM_FAULT_MAJOR;
5555+}
5556+
5557+static const struct vm_operations_struct pax_vm_ops = {
5558+ .close = pax_emuplt_close,
5559+ .fault = pax_emuplt_fault
5560+};
5561+
5562+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5563+{
5564+ int ret;
5565+
5566+ vma->vm_mm = current->mm;
5567+ vma->vm_start = addr;
5568+ vma->vm_end = addr + PAGE_SIZE;
5569+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5570+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5571+ vma->vm_ops = &pax_vm_ops;
5572+
5573+ ret = insert_vm_struct(current->mm, vma);
5574+ if (ret)
5575+ return ret;
5576+
5577+ ++current->mm->total_vm;
5578+ return 0;
5579+}
5580+#endif
5581+
5582+/*
5583+ * PaX: decide what to do with offenders (regs->tpc = fault address)
5584+ *
5585+ * returns 1 when task should be killed
5586+ * 2 when patched PLT trampoline was detected
5587+ * 3 when unpatched PLT trampoline was detected
5588+ */
5589+static int pax_handle_fetch_fault(struct pt_regs *regs)
5590+{
5591+
5592+#ifdef CONFIG_PAX_EMUPLT
5593+ int err;
5594+
5595+ do { /* PaX: patched PLT emulation #1 */
5596+ unsigned int sethi1, sethi2, jmpl;
5597+
5598+ err = get_user(sethi1, (unsigned int *)regs->tpc);
5599+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5600+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5601+
5602+ if (err)
5603+ break;
5604+
5605+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5606+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5607+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5608+ {
5609+ unsigned long addr;
5610+
5611+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5612+ addr = regs->u_regs[UREG_G1];
5613+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5614+
5615+ if (test_thread_flag(TIF_32BIT))
5616+ addr &= 0xFFFFFFFFUL;
5617+
5618+ regs->tpc = addr;
5619+ regs->tnpc = addr+4;
5620+ return 2;
5621+ }
5622+ } while (0);
5623+
5624+ { /* PaX: patched PLT emulation #2 */
5625+ unsigned int ba;
5626+
5627+ err = get_user(ba, (unsigned int *)regs->tpc);
5628+
5629+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5630+ unsigned long addr;
5631+
5632+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5633+
5634+ if (test_thread_flag(TIF_32BIT))
5635+ addr &= 0xFFFFFFFFUL;
5636+
5637+ regs->tpc = addr;
5638+ regs->tnpc = addr+4;
5639+ return 2;
5640+ }
5641+ }
5642+
5643+ do { /* PaX: patched PLT emulation #3 */
5644+ unsigned int sethi, jmpl, nop;
5645+
5646+ err = get_user(sethi, (unsigned int *)regs->tpc);
5647+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5648+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5649+
5650+ if (err)
5651+ break;
5652+
5653+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5654+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5655+ nop == 0x01000000U)
5656+ {
5657+ unsigned long addr;
5658+
5659+ addr = (sethi & 0x003FFFFFU) << 10;
5660+ regs->u_regs[UREG_G1] = addr;
5661+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5662+
5663+ if (test_thread_flag(TIF_32BIT))
5664+ addr &= 0xFFFFFFFFUL;
5665+
5666+ regs->tpc = addr;
5667+ regs->tnpc = addr+4;
5668+ return 2;
5669+ }
5670+ } while (0);
5671+
5672+ do { /* PaX: patched PLT emulation #4 */
5673+ unsigned int sethi, mov1, call, mov2;
5674+
5675+ err = get_user(sethi, (unsigned int *)regs->tpc);
5676+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5677+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
5678+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5679+
5680+ if (err)
5681+ break;
5682+
5683+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5684+ mov1 == 0x8210000FU &&
5685+ (call & 0xC0000000U) == 0x40000000U &&
5686+ mov2 == 0x9E100001U)
5687+ {
5688+ unsigned long addr;
5689+
5690+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5691+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5692+
5693+ if (test_thread_flag(TIF_32BIT))
5694+ addr &= 0xFFFFFFFFUL;
5695+
5696+ regs->tpc = addr;
5697+ regs->tnpc = addr+4;
5698+ return 2;
5699+ }
5700+ } while (0);
5701+
5702+ do { /* PaX: patched PLT emulation #5 */
5703+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5704+
5705+ err = get_user(sethi, (unsigned int *)regs->tpc);
5706+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5707+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5708+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5709+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5710+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5711+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5712+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5713+
5714+ if (err)
5715+ break;
5716+
5717+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5718+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5719+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5720+ (or1 & 0xFFFFE000U) == 0x82106000U &&
5721+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5722+ sllx == 0x83287020U &&
5723+ jmpl == 0x81C04005U &&
5724+ nop == 0x01000000U)
5725+ {
5726+ unsigned long addr;
5727+
5728+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5729+ regs->u_regs[UREG_G1] <<= 32;
5730+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5731+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5732+ regs->tpc = addr;
5733+ regs->tnpc = addr+4;
5734+ return 2;
5735+ }
5736+ } while (0);
5737+
5738+ do { /* PaX: patched PLT emulation #6 */
5739+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5740+
5741+ err = get_user(sethi, (unsigned int *)regs->tpc);
5742+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5743+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5744+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5745+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
5746+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5747+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5748+
5749+ if (err)
5750+ break;
5751+
5752+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5753+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5754+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5755+ sllx == 0x83287020U &&
5756+ (or & 0xFFFFE000U) == 0x8A116000U &&
5757+ jmpl == 0x81C04005U &&
5758+ nop == 0x01000000U)
5759+ {
5760+ unsigned long addr;
5761+
5762+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5763+ regs->u_regs[UREG_G1] <<= 32;
5764+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5765+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5766+ regs->tpc = addr;
5767+ regs->tnpc = addr+4;
5768+ return 2;
5769+ }
5770+ } while (0);
5771+
5772+ do { /* PaX: unpatched PLT emulation step 1 */
5773+ unsigned int sethi, ba, nop;
5774+
5775+ err = get_user(sethi, (unsigned int *)regs->tpc);
5776+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5777+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5778+
5779+ if (err)
5780+ break;
5781+
5782+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5783+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5784+ nop == 0x01000000U)
5785+ {
5786+ unsigned long addr;
5787+ unsigned int save, call;
5788+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5789+
5790+ if ((ba & 0xFFC00000U) == 0x30800000U)
5791+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5792+ else
5793+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5794+
5795+ if (test_thread_flag(TIF_32BIT))
5796+ addr &= 0xFFFFFFFFUL;
5797+
5798+ err = get_user(save, (unsigned int *)addr);
5799+ err |= get_user(call, (unsigned int *)(addr+4));
5800+ err |= get_user(nop, (unsigned int *)(addr+8));
5801+ if (err)
5802+ break;
5803+
5804+#ifdef CONFIG_PAX_DLRESOLVE
5805+ if (save == 0x9DE3BFA8U &&
5806+ (call & 0xC0000000U) == 0x40000000U &&
5807+ nop == 0x01000000U)
5808+ {
5809+ struct vm_area_struct *vma;
5810+ unsigned long call_dl_resolve;
5811+
5812+ down_read(&current->mm->mmap_sem);
5813+ call_dl_resolve = current->mm->call_dl_resolve;
5814+ up_read(&current->mm->mmap_sem);
5815+ if (likely(call_dl_resolve))
5816+ goto emulate;
5817+
5818+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5819+
5820+ down_write(&current->mm->mmap_sem);
5821+ if (current->mm->call_dl_resolve) {
5822+ call_dl_resolve = current->mm->call_dl_resolve;
5823+ up_write(&current->mm->mmap_sem);
5824+ if (vma)
5825+ kmem_cache_free(vm_area_cachep, vma);
5826+ goto emulate;
5827+ }
5828+
5829+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5830+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5831+ up_write(&current->mm->mmap_sem);
5832+ if (vma)
5833+ kmem_cache_free(vm_area_cachep, vma);
5834+ return 1;
5835+ }
5836+
5837+ if (pax_insert_vma(vma, call_dl_resolve)) {
5838+ up_write(&current->mm->mmap_sem);
5839+ kmem_cache_free(vm_area_cachep, vma);
5840+ return 1;
5841+ }
5842+
5843+ current->mm->call_dl_resolve = call_dl_resolve;
5844+ up_write(&current->mm->mmap_sem);
5845+
5846+emulate:
5847+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5848+ regs->tpc = call_dl_resolve;
5849+ regs->tnpc = addr+4;
5850+ return 3;
5851+ }
5852+#endif
5853+
5854+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5855+ if ((save & 0xFFC00000U) == 0x05000000U &&
5856+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5857+ nop == 0x01000000U)
5858+ {
5859+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5860+ regs->u_regs[UREG_G2] = addr + 4;
5861+ addr = (save & 0x003FFFFFU) << 10;
5862+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5863+
5864+ if (test_thread_flag(TIF_32BIT))
5865+ addr &= 0xFFFFFFFFUL;
5866+
5867+ regs->tpc = addr;
5868+ regs->tnpc = addr+4;
5869+ return 3;
5870+ }
5871+
5872+ /* PaX: 64-bit PLT stub */
5873+ err = get_user(sethi1, (unsigned int *)addr);
5874+ err |= get_user(sethi2, (unsigned int *)(addr+4));
5875+ err |= get_user(or1, (unsigned int *)(addr+8));
5876+ err |= get_user(or2, (unsigned int *)(addr+12));
5877+ err |= get_user(sllx, (unsigned int *)(addr+16));
5878+ err |= get_user(add, (unsigned int *)(addr+20));
5879+ err |= get_user(jmpl, (unsigned int *)(addr+24));
5880+ err |= get_user(nop, (unsigned int *)(addr+28));
5881+ if (err)
5882+ break;
5883+
5884+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5885+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5886+ (or1 & 0xFFFFE000U) == 0x88112000U &&
5887+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5888+ sllx == 0x89293020U &&
5889+ add == 0x8A010005U &&
5890+ jmpl == 0x89C14000U &&
5891+ nop == 0x01000000U)
5892+ {
5893+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5894+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5895+ regs->u_regs[UREG_G4] <<= 32;
5896+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5897+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5898+ regs->u_regs[UREG_G4] = addr + 24;
5899+ addr = regs->u_regs[UREG_G5];
5900+ regs->tpc = addr;
5901+ regs->tnpc = addr+4;
5902+ return 3;
5903+ }
5904+ }
5905+ } while (0);
5906+
5907+#ifdef CONFIG_PAX_DLRESOLVE
5908+ do { /* PaX: unpatched PLT emulation step 2 */
5909+ unsigned int save, call, nop;
5910+
5911+ err = get_user(save, (unsigned int *)(regs->tpc-4));
5912+ err |= get_user(call, (unsigned int *)regs->tpc);
5913+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5914+ if (err)
5915+ break;
5916+
5917+ if (save == 0x9DE3BFA8U &&
5918+ (call & 0xC0000000U) == 0x40000000U &&
5919+ nop == 0x01000000U)
5920+ {
5921+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5922+
5923+ if (test_thread_flag(TIF_32BIT))
5924+ dl_resolve &= 0xFFFFFFFFUL;
5925+
5926+ regs->u_regs[UREG_RETPC] = regs->tpc;
5927+ regs->tpc = dl_resolve;
5928+ regs->tnpc = dl_resolve+4;
5929+ return 3;
5930+ }
5931+ } while (0);
5932+#endif
5933+
5934+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5935+ unsigned int sethi, ba, nop;
5936+
5937+ err = get_user(sethi, (unsigned int *)regs->tpc);
5938+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5939+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5940+
5941+ if (err)
5942+ break;
5943+
5944+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5945+ (ba & 0xFFF00000U) == 0x30600000U &&
5946+ nop == 0x01000000U)
5947+ {
5948+ unsigned long addr;
5949+
5950+ addr = (sethi & 0x003FFFFFU) << 10;
5951+ regs->u_regs[UREG_G1] = addr;
5952+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5953+
5954+ if (test_thread_flag(TIF_32BIT))
5955+ addr &= 0xFFFFFFFFUL;
5956+
5957+ regs->tpc = addr;
5958+ regs->tnpc = addr+4;
5959+ return 2;
5960+ }
5961+ } while (0);
5962+
5963+#endif
5964+
5965+ return 1;
5966+}
5967+
5968+void pax_report_insns(void *pc, void *sp)
5969+{
5970+ unsigned long i;
5971+
5972+ printk(KERN_ERR "PAX: bytes at PC: ");
5973+ for (i = 0; i < 8; i++) {
5974+ unsigned int c;
5975+ if (get_user(c, (unsigned int *)pc+i))
5976+ printk(KERN_CONT "???????? ");
5977+ else
5978+ printk(KERN_CONT "%08x ", c);
5979+ }
5980+ printk("\n");
5981+}
5982+#endif
5983+
5984 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5985 {
5986 struct mm_struct *mm = current->mm;
5987@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5988 if (!vma)
5989 goto bad_area;
5990
5991+#ifdef CONFIG_PAX_PAGEEXEC
5992+ /* PaX: detect ITLB misses on non-exec pages */
5993+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5994+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5995+ {
5996+ if (address != regs->tpc)
5997+ goto good_area;
5998+
5999+ up_read(&mm->mmap_sem);
6000+ switch (pax_handle_fetch_fault(regs)) {
6001+
6002+#ifdef CONFIG_PAX_EMUPLT
6003+ case 2:
6004+ case 3:
6005+ return;
6006+#endif
6007+
6008+ }
6009+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6010+ do_group_exit(SIGKILL);
6011+ }
6012+#endif
6013+
6014 /* Pure DTLB misses do not tell us whether the fault causing
6015 * load/store/atomic was a write or not, it only says that there
6016 * was no match. So in such a case we (carefully) read the
6017diff -urNp linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c
6018--- linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
6019+++ linux-2.6.32.45/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
6020@@ -69,7 +69,7 @@ full_search:
6021 }
6022 return -ENOMEM;
6023 }
6024- if (likely(!vma || addr + len <= vma->vm_start)) {
6025+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6026 /*
6027 * Remember the place where we stopped the search:
6028 */
6029@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
6030 /* make sure it can fit in the remaining address space */
6031 if (likely(addr > len)) {
6032 vma = find_vma(mm, addr-len);
6033- if (!vma || addr <= vma->vm_start) {
6034+ if (check_heap_stack_gap(vma, addr - len, len)) {
6035 /* remember the address as a hint for next time */
6036 return (mm->free_area_cache = addr-len);
6037 }
6038@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
6039 if (unlikely(mm->mmap_base < len))
6040 goto bottomup;
6041
6042- addr = (mm->mmap_base-len) & HPAGE_MASK;
6043+ addr = mm->mmap_base - len;
6044
6045 do {
6046+ addr &= HPAGE_MASK;
6047 /*
6048 * Lookup failure means no vma is above this address,
6049 * else if new region fits below vma->vm_start,
6050 * return with success:
6051 */
6052 vma = find_vma(mm, addr);
6053- if (likely(!vma || addr+len <= vma->vm_start)) {
6054+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6055 /* remember the address as a hint for next time */
6056 return (mm->free_area_cache = addr);
6057 }
6058@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
6059 mm->cached_hole_size = vma->vm_start - addr;
6060
6061 /* try just below the current vma->vm_start */
6062- addr = (vma->vm_start-len) & HPAGE_MASK;
6063- } while (likely(len < vma->vm_start));
6064+ addr = skip_heap_stack_gap(vma, len);
6065+ } while (!IS_ERR_VALUE(addr));
6066
6067 bottomup:
6068 /*
6069@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
6070 if (addr) {
6071 addr = ALIGN(addr, HPAGE_SIZE);
6072 vma = find_vma(mm, addr);
6073- if (task_size - len >= addr &&
6074- (!vma || addr + len <= vma->vm_start))
6075+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6076 return addr;
6077 }
6078 if (mm->get_unmapped_area == arch_get_unmapped_area)
6079diff -urNp linux-2.6.32.45/arch/sparc/mm/init_32.c linux-2.6.32.45/arch/sparc/mm/init_32.c
6080--- linux-2.6.32.45/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
6081+++ linux-2.6.32.45/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
6082@@ -317,6 +317,9 @@ extern void device_scan(void);
6083 pgprot_t PAGE_SHARED __read_mostly;
6084 EXPORT_SYMBOL(PAGE_SHARED);
6085
6086+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6087+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6088+
6089 void __init paging_init(void)
6090 {
6091 switch(sparc_cpu_model) {
6092@@ -345,17 +348,17 @@ void __init paging_init(void)
6093
6094 /* Initialize the protection map with non-constant, MMU dependent values. */
6095 protection_map[0] = PAGE_NONE;
6096- protection_map[1] = PAGE_READONLY;
6097- protection_map[2] = PAGE_COPY;
6098- protection_map[3] = PAGE_COPY;
6099+ protection_map[1] = PAGE_READONLY_NOEXEC;
6100+ protection_map[2] = PAGE_COPY_NOEXEC;
6101+ protection_map[3] = PAGE_COPY_NOEXEC;
6102 protection_map[4] = PAGE_READONLY;
6103 protection_map[5] = PAGE_READONLY;
6104 protection_map[6] = PAGE_COPY;
6105 protection_map[7] = PAGE_COPY;
6106 protection_map[8] = PAGE_NONE;
6107- protection_map[9] = PAGE_READONLY;
6108- protection_map[10] = PAGE_SHARED;
6109- protection_map[11] = PAGE_SHARED;
6110+ protection_map[9] = PAGE_READONLY_NOEXEC;
6111+ protection_map[10] = PAGE_SHARED_NOEXEC;
6112+ protection_map[11] = PAGE_SHARED_NOEXEC;
6113 protection_map[12] = PAGE_READONLY;
6114 protection_map[13] = PAGE_READONLY;
6115 protection_map[14] = PAGE_SHARED;
6116diff -urNp linux-2.6.32.45/arch/sparc/mm/Makefile linux-2.6.32.45/arch/sparc/mm/Makefile
6117--- linux-2.6.32.45/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
6118+++ linux-2.6.32.45/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
6119@@ -2,7 +2,7 @@
6120 #
6121
6122 asflags-y := -ansi
6123-ccflags-y := -Werror
6124+#ccflags-y := -Werror
6125
6126 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6127 obj-y += fault_$(BITS).o
6128diff -urNp linux-2.6.32.45/arch/sparc/mm/srmmu.c linux-2.6.32.45/arch/sparc/mm/srmmu.c
6129--- linux-2.6.32.45/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
6130+++ linux-2.6.32.45/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
6131@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6132 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6133 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6134 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6135+
6136+#ifdef CONFIG_PAX_PAGEEXEC
6137+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6138+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6139+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6140+#endif
6141+
6142 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6143 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6144
6145diff -urNp linux-2.6.32.45/arch/um/include/asm/kmap_types.h linux-2.6.32.45/arch/um/include/asm/kmap_types.h
6146--- linux-2.6.32.45/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
6147+++ linux-2.6.32.45/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
6148@@ -23,6 +23,7 @@ enum km_type {
6149 KM_IRQ1,
6150 KM_SOFTIRQ0,
6151 KM_SOFTIRQ1,
6152+ KM_CLEARPAGE,
6153 KM_TYPE_NR
6154 };
6155
6156diff -urNp linux-2.6.32.45/arch/um/include/asm/page.h linux-2.6.32.45/arch/um/include/asm/page.h
6157--- linux-2.6.32.45/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
6158+++ linux-2.6.32.45/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
6159@@ -14,6 +14,9 @@
6160 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6161 #define PAGE_MASK (~(PAGE_SIZE-1))
6162
6163+#define ktla_ktva(addr) (addr)
6164+#define ktva_ktla(addr) (addr)
6165+
6166 #ifndef __ASSEMBLY__
6167
6168 struct page;
6169diff -urNp linux-2.6.32.45/arch/um/kernel/process.c linux-2.6.32.45/arch/um/kernel/process.c
6170--- linux-2.6.32.45/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
6171+++ linux-2.6.32.45/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
6172@@ -393,22 +393,6 @@ int singlestepping(void * t)
6173 return 2;
6174 }
6175
6176-/*
6177- * Only x86 and x86_64 have an arch_align_stack().
6178- * All other arches have "#define arch_align_stack(x) (x)"
6179- * in their asm/system.h
6180- * As this is included in UML from asm-um/system-generic.h,
6181- * we can use it to behave as the subarch does.
6182- */
6183-#ifndef arch_align_stack
6184-unsigned long arch_align_stack(unsigned long sp)
6185-{
6186- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6187- sp -= get_random_int() % 8192;
6188- return sp & ~0xf;
6189-}
6190-#endif
6191-
6192 unsigned long get_wchan(struct task_struct *p)
6193 {
6194 unsigned long stack_page, sp, ip;
6195diff -urNp linux-2.6.32.45/arch/um/sys-i386/syscalls.c linux-2.6.32.45/arch/um/sys-i386/syscalls.c
6196--- linux-2.6.32.45/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
6197+++ linux-2.6.32.45/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
6198@@ -11,6 +11,21 @@
6199 #include "asm/uaccess.h"
6200 #include "asm/unistd.h"
6201
6202+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6203+{
6204+ unsigned long pax_task_size = TASK_SIZE;
6205+
6206+#ifdef CONFIG_PAX_SEGMEXEC
6207+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6208+ pax_task_size = SEGMEXEC_TASK_SIZE;
6209+#endif
6210+
6211+ if (len > pax_task_size || addr > pax_task_size - len)
6212+ return -EINVAL;
6213+
6214+ return 0;
6215+}
6216+
6217 /*
6218 * Perform the select(nd, in, out, ex, tv) and mmap() system
6219 * calls. Linux/i386 didn't use to be able to handle more than
6220diff -urNp linux-2.6.32.45/arch/x86/boot/bitops.h linux-2.6.32.45/arch/x86/boot/bitops.h
6221--- linux-2.6.32.45/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
6222+++ linux-2.6.32.45/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
6223@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
6224 u8 v;
6225 const u32 *p = (const u32 *)addr;
6226
6227- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6228+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6229 return v;
6230 }
6231
6232@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
6233
6234 static inline void set_bit(int nr, void *addr)
6235 {
6236- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6237+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6238 }
6239
6240 #endif /* BOOT_BITOPS_H */
6241diff -urNp linux-2.6.32.45/arch/x86/boot/boot.h linux-2.6.32.45/arch/x86/boot/boot.h
6242--- linux-2.6.32.45/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
6243+++ linux-2.6.32.45/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
6244@@ -82,7 +82,7 @@ static inline void io_delay(void)
6245 static inline u16 ds(void)
6246 {
6247 u16 seg;
6248- asm("movw %%ds,%0" : "=rm" (seg));
6249+ asm volatile("movw %%ds,%0" : "=rm" (seg));
6250 return seg;
6251 }
6252
6253@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
6254 static inline int memcmp(const void *s1, const void *s2, size_t len)
6255 {
6256 u8 diff;
6257- asm("repe; cmpsb; setnz %0"
6258+ asm volatile("repe; cmpsb; setnz %0"
6259 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6260 return diff;
6261 }
6262diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/head_32.S linux-2.6.32.45/arch/x86/boot/compressed/head_32.S
6263--- linux-2.6.32.45/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
6264+++ linux-2.6.32.45/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
6265@@ -76,7 +76,7 @@ ENTRY(startup_32)
6266 notl %eax
6267 andl %eax, %ebx
6268 #else
6269- movl $LOAD_PHYSICAL_ADDR, %ebx
6270+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6271 #endif
6272
6273 /* Target address to relocate to for decompression */
6274@@ -149,7 +149,7 @@ relocated:
6275 * and where it was actually loaded.
6276 */
6277 movl %ebp, %ebx
6278- subl $LOAD_PHYSICAL_ADDR, %ebx
6279+ subl $____LOAD_PHYSICAL_ADDR, %ebx
6280 jz 2f /* Nothing to be done if loaded at compiled addr. */
6281 /*
6282 * Process relocations.
6283@@ -157,8 +157,7 @@ relocated:
6284
6285 1: subl $4, %edi
6286 movl (%edi), %ecx
6287- testl %ecx, %ecx
6288- jz 2f
6289+ jecxz 2f
6290 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6291 jmp 1b
6292 2:
6293diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/head_64.S linux-2.6.32.45/arch/x86/boot/compressed/head_64.S
6294--- linux-2.6.32.45/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
6295+++ linux-2.6.32.45/arch/x86/boot/compressed/head_64.S 2011-07-01 18:53:00.000000000 -0400
6296@@ -91,7 +91,7 @@ ENTRY(startup_32)
6297 notl %eax
6298 andl %eax, %ebx
6299 #else
6300- movl $LOAD_PHYSICAL_ADDR, %ebx
6301+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6302 #endif
6303
6304 /* Target address to relocate to for decompression */
6305@@ -183,7 +183,7 @@ no_longmode:
6306 hlt
6307 jmp 1b
6308
6309-#include "../../kernel/verify_cpu_64.S"
6310+#include "../../kernel/verify_cpu.S"
6311
6312 /*
6313 * Be careful here startup_64 needs to be at a predictable
6314@@ -234,7 +234,7 @@ ENTRY(startup_64)
6315 notq %rax
6316 andq %rax, %rbp
6317 #else
6318- movq $LOAD_PHYSICAL_ADDR, %rbp
6319+ movq $____LOAD_PHYSICAL_ADDR, %rbp
6320 #endif
6321
6322 /* Target address to relocate to for decompression */
6323diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/Makefile linux-2.6.32.45/arch/x86/boot/compressed/Makefile
6324--- linux-2.6.32.45/arch/x86/boot/compressed/Makefile 2011-03-27 14:31:47.000000000 -0400
6325+++ linux-2.6.32.45/arch/x86/boot/compressed/Makefile 2011-08-07 14:38:34.000000000 -0400
6326@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
6327 KBUILD_CFLAGS += $(cflags-y)
6328 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
6329 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
6330+ifdef CONSTIFY_PLUGIN
6331+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6332+endif
6333
6334 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6335 GCOV_PROFILE := n
6336diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/misc.c linux-2.6.32.45/arch/x86/boot/compressed/misc.c
6337--- linux-2.6.32.45/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
6338+++ linux-2.6.32.45/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
6339@@ -288,7 +288,7 @@ static void parse_elf(void *output)
6340 case PT_LOAD:
6341 #ifdef CONFIG_RELOCATABLE
6342 dest = output;
6343- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6344+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6345 #else
6346 dest = (void *)(phdr->p_paddr);
6347 #endif
6348@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6349 error("Destination address too large");
6350 #endif
6351 #ifndef CONFIG_RELOCATABLE
6352- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6353+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6354 error("Wrong destination address");
6355 #endif
6356
6357diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c
6358--- linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
6359+++ linux-2.6.32.45/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
6360@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6361
6362 offs = (olen > ilen) ? olen - ilen : 0;
6363 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6364- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6365+ offs += 64*1024; /* Add 64K bytes slack */
6366 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6367
6368 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6369diff -urNp linux-2.6.32.45/arch/x86/boot/compressed/relocs.c linux-2.6.32.45/arch/x86/boot/compressed/relocs.c
6370--- linux-2.6.32.45/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
6371+++ linux-2.6.32.45/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
6372@@ -10,8 +10,11 @@
6373 #define USE_BSD
6374 #include <endian.h>
6375
6376+#include "../../../../include/linux/autoconf.h"
6377+
6378 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6379 static Elf32_Ehdr ehdr;
6380+static Elf32_Phdr *phdr;
6381 static unsigned long reloc_count, reloc_idx;
6382 static unsigned long *relocs;
6383
6384@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6385
6386 static int is_safe_abs_reloc(const char* sym_name)
6387 {
6388- int i;
6389+ unsigned int i;
6390
6391 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6392 if (!strcmp(sym_name, safe_abs_relocs[i]))
6393@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6394 }
6395 }
6396
6397+static void read_phdrs(FILE *fp)
6398+{
6399+ unsigned int i;
6400+
6401+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6402+ if (!phdr) {
6403+ die("Unable to allocate %d program headers\n",
6404+ ehdr.e_phnum);
6405+ }
6406+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6407+ die("Seek to %d failed: %s\n",
6408+ ehdr.e_phoff, strerror(errno));
6409+ }
6410+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6411+ die("Cannot read ELF program headers: %s\n",
6412+ strerror(errno));
6413+ }
6414+ for(i = 0; i < ehdr.e_phnum; i++) {
6415+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6416+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6417+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6418+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6419+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6420+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6421+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6422+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6423+ }
6424+
6425+}
6426+
6427 static void read_shdrs(FILE *fp)
6428 {
6429- int i;
6430+ unsigned int i;
6431 Elf32_Shdr shdr;
6432
6433 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6434@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6435
6436 static void read_strtabs(FILE *fp)
6437 {
6438- int i;
6439+ unsigned int i;
6440 for (i = 0; i < ehdr.e_shnum; i++) {
6441 struct section *sec = &secs[i];
6442 if (sec->shdr.sh_type != SHT_STRTAB) {
6443@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6444
6445 static void read_symtabs(FILE *fp)
6446 {
6447- int i,j;
6448+ unsigned int i,j;
6449 for (i = 0; i < ehdr.e_shnum; i++) {
6450 struct section *sec = &secs[i];
6451 if (sec->shdr.sh_type != SHT_SYMTAB) {
6452@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6453
6454 static void read_relocs(FILE *fp)
6455 {
6456- int i,j;
6457+ unsigned int i,j;
6458+ uint32_t base;
6459+
6460 for (i = 0; i < ehdr.e_shnum; i++) {
6461 struct section *sec = &secs[i];
6462 if (sec->shdr.sh_type != SHT_REL) {
6463@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6464 die("Cannot read symbol table: %s\n",
6465 strerror(errno));
6466 }
6467+ base = 0;
6468+ for (j = 0; j < ehdr.e_phnum; j++) {
6469+ if (phdr[j].p_type != PT_LOAD )
6470+ continue;
6471+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6472+ continue;
6473+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6474+ break;
6475+ }
6476 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6477 Elf32_Rel *rel = &sec->reltab[j];
6478- rel->r_offset = elf32_to_cpu(rel->r_offset);
6479+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6480 rel->r_info = elf32_to_cpu(rel->r_info);
6481 }
6482 }
6483@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6484
6485 static void print_absolute_symbols(void)
6486 {
6487- int i;
6488+ unsigned int i;
6489 printf("Absolute symbols\n");
6490 printf(" Num: Value Size Type Bind Visibility Name\n");
6491 for (i = 0; i < ehdr.e_shnum; i++) {
6492 struct section *sec = &secs[i];
6493 char *sym_strtab;
6494 Elf32_Sym *sh_symtab;
6495- int j;
6496+ unsigned int j;
6497
6498 if (sec->shdr.sh_type != SHT_SYMTAB) {
6499 continue;
6500@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6501
6502 static void print_absolute_relocs(void)
6503 {
6504- int i, printed = 0;
6505+ unsigned int i, printed = 0;
6506
6507 for (i = 0; i < ehdr.e_shnum; i++) {
6508 struct section *sec = &secs[i];
6509 struct section *sec_applies, *sec_symtab;
6510 char *sym_strtab;
6511 Elf32_Sym *sh_symtab;
6512- int j;
6513+ unsigned int j;
6514 if (sec->shdr.sh_type != SHT_REL) {
6515 continue;
6516 }
6517@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6518
6519 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6520 {
6521- int i;
6522+ unsigned int i;
6523 /* Walk through the relocations */
6524 for (i = 0; i < ehdr.e_shnum; i++) {
6525 char *sym_strtab;
6526 Elf32_Sym *sh_symtab;
6527 struct section *sec_applies, *sec_symtab;
6528- int j;
6529+ unsigned int j;
6530 struct section *sec = &secs[i];
6531
6532 if (sec->shdr.sh_type != SHT_REL) {
6533@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6534 if (sym->st_shndx == SHN_ABS) {
6535 continue;
6536 }
6537+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6538+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6539+ continue;
6540+
6541+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6542+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6543+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6544+ continue;
6545+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6546+ continue;
6547+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6548+ continue;
6549+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6550+ continue;
6551+#endif
6552 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6553 /*
6554 * NONE can be ignored and and PC relative
6555@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6556
6557 static void emit_relocs(int as_text)
6558 {
6559- int i;
6560+ unsigned int i;
6561 /* Count how many relocations I have and allocate space for them. */
6562 reloc_count = 0;
6563 walk_relocs(count_reloc);
6564@@ -634,6 +693,7 @@ int main(int argc, char **argv)
6565 fname, strerror(errno));
6566 }
6567 read_ehdr(fp);
6568+ read_phdrs(fp);
6569 read_shdrs(fp);
6570 read_strtabs(fp);
6571 read_symtabs(fp);
6572diff -urNp linux-2.6.32.45/arch/x86/boot/cpucheck.c linux-2.6.32.45/arch/x86/boot/cpucheck.c
6573--- linux-2.6.32.45/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6574+++ linux-2.6.32.45/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6575@@ -74,7 +74,7 @@ static int has_fpu(void)
6576 u16 fcw = -1, fsw = -1;
6577 u32 cr0;
6578
6579- asm("movl %%cr0,%0" : "=r" (cr0));
6580+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
6581 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6582 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6583 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6584@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6585 {
6586 u32 f0, f1;
6587
6588- asm("pushfl ; "
6589+ asm volatile("pushfl ; "
6590 "pushfl ; "
6591 "popl %0 ; "
6592 "movl %0,%1 ; "
6593@@ -115,7 +115,7 @@ static void get_flags(void)
6594 set_bit(X86_FEATURE_FPU, cpu.flags);
6595
6596 if (has_eflag(X86_EFLAGS_ID)) {
6597- asm("cpuid"
6598+ asm volatile("cpuid"
6599 : "=a" (max_intel_level),
6600 "=b" (cpu_vendor[0]),
6601 "=d" (cpu_vendor[1]),
6602@@ -124,7 +124,7 @@ static void get_flags(void)
6603
6604 if (max_intel_level >= 0x00000001 &&
6605 max_intel_level <= 0x0000ffff) {
6606- asm("cpuid"
6607+ asm volatile("cpuid"
6608 : "=a" (tfms),
6609 "=c" (cpu.flags[4]),
6610 "=d" (cpu.flags[0])
6611@@ -136,7 +136,7 @@ static void get_flags(void)
6612 cpu.model += ((tfms >> 16) & 0xf) << 4;
6613 }
6614
6615- asm("cpuid"
6616+ asm volatile("cpuid"
6617 : "=a" (max_amd_level)
6618 : "a" (0x80000000)
6619 : "ebx", "ecx", "edx");
6620@@ -144,7 +144,7 @@ static void get_flags(void)
6621 if (max_amd_level >= 0x80000001 &&
6622 max_amd_level <= 0x8000ffff) {
6623 u32 eax = 0x80000001;
6624- asm("cpuid"
6625+ asm volatile("cpuid"
6626 : "+a" (eax),
6627 "=c" (cpu.flags[6]),
6628 "=d" (cpu.flags[1])
6629@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6630 u32 ecx = MSR_K7_HWCR;
6631 u32 eax, edx;
6632
6633- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6634+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6635 eax &= ~(1 << 15);
6636- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6637+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6638
6639 get_flags(); /* Make sure it really did something */
6640 err = check_flags();
6641@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6642 u32 ecx = MSR_VIA_FCR;
6643 u32 eax, edx;
6644
6645- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6646+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6647 eax |= (1<<1)|(1<<7);
6648- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6649+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6650
6651 set_bit(X86_FEATURE_CX8, cpu.flags);
6652 err = check_flags();
6653@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6654 u32 eax, edx;
6655 u32 level = 1;
6656
6657- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6658- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6659- asm("cpuid"
6660+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6661+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6662+ asm volatile("cpuid"
6663 : "+a" (level), "=d" (cpu.flags[0])
6664 : : "ecx", "ebx");
6665- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6666+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6667
6668 err = check_flags();
6669 }
6670diff -urNp linux-2.6.32.45/arch/x86/boot/header.S linux-2.6.32.45/arch/x86/boot/header.S
6671--- linux-2.6.32.45/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6672+++ linux-2.6.32.45/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6673@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6674 # single linked list of
6675 # struct setup_data
6676
6677-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6678+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6679
6680 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6681 #define VO_INIT_SIZE (VO__end - VO__text)
6682diff -urNp linux-2.6.32.45/arch/x86/boot/Makefile linux-2.6.32.45/arch/x86/boot/Makefile
6683--- linux-2.6.32.45/arch/x86/boot/Makefile 2011-03-27 14:31:47.000000000 -0400
6684+++ linux-2.6.32.45/arch/x86/boot/Makefile 2011-08-07 14:38:13.000000000 -0400
6685@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
6686 $(call cc-option, -fno-stack-protector) \
6687 $(call cc-option, -mpreferred-stack-boundary=2)
6688 KBUILD_CFLAGS += $(call cc-option, -m32)
6689+ifdef CONSTIFY_PLUGIN
6690+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6691+endif
6692 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6693 GCOV_PROFILE := n
6694
6695diff -urNp linux-2.6.32.45/arch/x86/boot/memory.c linux-2.6.32.45/arch/x86/boot/memory.c
6696--- linux-2.6.32.45/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6697+++ linux-2.6.32.45/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6698@@ -19,7 +19,7 @@
6699
6700 static int detect_memory_e820(void)
6701 {
6702- int count = 0;
6703+ unsigned int count = 0;
6704 struct biosregs ireg, oreg;
6705 struct e820entry *desc = boot_params.e820_map;
6706 static struct e820entry buf; /* static so it is zeroed */
6707diff -urNp linux-2.6.32.45/arch/x86/boot/video.c linux-2.6.32.45/arch/x86/boot/video.c
6708--- linux-2.6.32.45/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6709+++ linux-2.6.32.45/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6710@@ -90,7 +90,7 @@ static void store_mode_params(void)
6711 static unsigned int get_entry(void)
6712 {
6713 char entry_buf[4];
6714- int i, len = 0;
6715+ unsigned int i, len = 0;
6716 int key;
6717 unsigned int v;
6718
6719diff -urNp linux-2.6.32.45/arch/x86/boot/video-vesa.c linux-2.6.32.45/arch/x86/boot/video-vesa.c
6720--- linux-2.6.32.45/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6721+++ linux-2.6.32.45/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6722@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6723
6724 boot_params.screen_info.vesapm_seg = oreg.es;
6725 boot_params.screen_info.vesapm_off = oreg.di;
6726+ boot_params.screen_info.vesapm_size = oreg.cx;
6727 }
6728
6729 /*
6730diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32_aout.c linux-2.6.32.45/arch/x86/ia32/ia32_aout.c
6731--- linux-2.6.32.45/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6732+++ linux-2.6.32.45/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6733@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6734 unsigned long dump_start, dump_size;
6735 struct user32 dump;
6736
6737+ memset(&dump, 0, sizeof(dump));
6738+
6739 fs = get_fs();
6740 set_fs(KERNEL_DS);
6741 has_dumped = 1;
6742@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6743 dump_size = dump.u_ssize << PAGE_SHIFT;
6744 DUMP_WRITE(dump_start, dump_size);
6745 }
6746- /*
6747- * Finally dump the task struct. Not be used by gdb, but
6748- * could be useful
6749- */
6750- set_fs(KERNEL_DS);
6751- DUMP_WRITE(current, sizeof(*current));
6752 end_coredump:
6753 set_fs(fs);
6754 return has_dumped;
6755diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32entry.S linux-2.6.32.45/arch/x86/ia32/ia32entry.S
6756--- linux-2.6.32.45/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6757+++ linux-2.6.32.45/arch/x86/ia32/ia32entry.S 2011-06-04 20:29:52.000000000 -0400
6758@@ -13,6 +13,7 @@
6759 #include <asm/thread_info.h>
6760 #include <asm/segment.h>
6761 #include <asm/irqflags.h>
6762+#include <asm/pgtable.h>
6763 #include <linux/linkage.h>
6764
6765 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6766@@ -93,6 +94,30 @@ ENTRY(native_irq_enable_sysexit)
6767 ENDPROC(native_irq_enable_sysexit)
6768 #endif
6769
6770+ .macro pax_enter_kernel_user
6771+#ifdef CONFIG_PAX_MEMORY_UDEREF
6772+ call pax_enter_kernel_user
6773+#endif
6774+ .endm
6775+
6776+ .macro pax_exit_kernel_user
6777+#ifdef CONFIG_PAX_MEMORY_UDEREF
6778+ call pax_exit_kernel_user
6779+#endif
6780+#ifdef CONFIG_PAX_RANDKSTACK
6781+ pushq %rax
6782+ call pax_randomize_kstack
6783+ popq %rax
6784+#endif
6785+ pax_erase_kstack
6786+ .endm
6787+
6788+.macro pax_erase_kstack
6789+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6790+ call pax_erase_kstack
6791+#endif
6792+.endm
6793+
6794 /*
6795 * 32bit SYSENTER instruction entry.
6796 *
6797@@ -119,7 +144,7 @@ ENTRY(ia32_sysenter_target)
6798 CFI_REGISTER rsp,rbp
6799 SWAPGS_UNSAFE_STACK
6800 movq PER_CPU_VAR(kernel_stack), %rsp
6801- addq $(KERNEL_STACK_OFFSET),%rsp
6802+ pax_enter_kernel_user
6803 /*
6804 * No need to follow this irqs on/off section: the syscall
6805 * disabled irqs, here we enable it straight after entry:
6806@@ -135,7 +160,8 @@ ENTRY(ia32_sysenter_target)
6807 pushfq
6808 CFI_ADJUST_CFA_OFFSET 8
6809 /*CFI_REL_OFFSET rflags,0*/
6810- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6811+ GET_THREAD_INFO(%r10)
6812+ movl TI_sysenter_return(%r10), %r10d
6813 CFI_REGISTER rip,r10
6814 pushq $__USER32_CS
6815 CFI_ADJUST_CFA_OFFSET 8
6816@@ -150,6 +176,12 @@ ENTRY(ia32_sysenter_target)
6817 SAVE_ARGS 0,0,1
6818 /* no need to do an access_ok check here because rbp has been
6819 32bit zero extended */
6820+
6821+#ifdef CONFIG_PAX_MEMORY_UDEREF
6822+ mov $PAX_USER_SHADOW_BASE,%r10
6823+ add %r10,%rbp
6824+#endif
6825+
6826 1: movl (%rbp),%ebp
6827 .section __ex_table,"a"
6828 .quad 1b,ia32_badarg
6829@@ -172,6 +204,7 @@ sysenter_dispatch:
6830 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6831 jnz sysexit_audit
6832 sysexit_from_sys_call:
6833+ pax_exit_kernel_user
6834 andl $~TS_COMPAT,TI_status(%r10)
6835 /* clear IF, that popfq doesn't enable interrupts early */
6836 andl $~0x200,EFLAGS-R11(%rsp)
6837@@ -200,6 +233,9 @@ sysexit_from_sys_call:
6838 movl %eax,%esi /* 2nd arg: syscall number */
6839 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6840 call audit_syscall_entry
6841+
6842+ pax_erase_kstack
6843+
6844 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6845 cmpq $(IA32_NR_syscalls-1),%rax
6846 ja ia32_badsys
6847@@ -252,6 +288,9 @@ sysenter_tracesys:
6848 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6849 movq %rsp,%rdi /* &pt_regs -> arg1 */
6850 call syscall_trace_enter
6851+
6852+ pax_erase_kstack
6853+
6854 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6855 RESTORE_REST
6856 cmpq $(IA32_NR_syscalls-1),%rax
6857@@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6858 ENTRY(ia32_cstar_target)
6859 CFI_STARTPROC32 simple
6860 CFI_SIGNAL_FRAME
6861- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6862+ CFI_DEF_CFA rsp,0
6863 CFI_REGISTER rip,rcx
6864 /*CFI_REGISTER rflags,r11*/
6865 SWAPGS_UNSAFE_STACK
6866 movl %esp,%r8d
6867 CFI_REGISTER rsp,r8
6868 movq PER_CPU_VAR(kernel_stack),%rsp
6869+
6870+#ifdef CONFIG_PAX_MEMORY_UDEREF
6871+ pax_enter_kernel_user
6872+#endif
6873+
6874 /*
6875 * No need to follow this irqs on/off section: the syscall
6876 * disabled irqs and here we enable it straight after entry:
6877 */
6878 ENABLE_INTERRUPTS(CLBR_NONE)
6879- SAVE_ARGS 8,1,1
6880+ SAVE_ARGS 8*6,1,1
6881 movl %eax,%eax /* zero extension */
6882 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6883 movq %rcx,RIP-ARGOFFSET(%rsp)
6884@@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6885 /* no need to do an access_ok check here because r8 has been
6886 32bit zero extended */
6887 /* hardware stack frame is complete now */
6888+
6889+#ifdef CONFIG_PAX_MEMORY_UDEREF
6890+ mov $PAX_USER_SHADOW_BASE,%r10
6891+ add %r10,%r8
6892+#endif
6893+
6894 1: movl (%r8),%r9d
6895 .section __ex_table,"a"
6896 .quad 1b,ia32_badarg
6897@@ -333,6 +383,7 @@ cstar_dispatch:
6898 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6899 jnz sysretl_audit
6900 sysretl_from_sys_call:
6901+ pax_exit_kernel_user
6902 andl $~TS_COMPAT,TI_status(%r10)
6903 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6904 movl RIP-ARGOFFSET(%rsp),%ecx
6905@@ -370,6 +421,9 @@ cstar_tracesys:
6906 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6907 movq %rsp,%rdi /* &pt_regs -> arg1 */
6908 call syscall_trace_enter
6909+
6910+ pax_erase_kstack
6911+
6912 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6913 RESTORE_REST
6914 xchgl %ebp,%r9d
6915@@ -415,6 +469,7 @@ ENTRY(ia32_syscall)
6916 CFI_REL_OFFSET rip,RIP-RIP
6917 PARAVIRT_ADJUST_EXCEPTION_FRAME
6918 SWAPGS
6919+ pax_enter_kernel_user
6920 /*
6921 * No need to follow this irqs on/off section: the syscall
6922 * disabled irqs and here we enable it straight after entry:
6923@@ -448,6 +503,9 @@ ia32_tracesys:
6924 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6925 movq %rsp,%rdi /* &pt_regs -> arg1 */
6926 call syscall_trace_enter
6927+
6928+ pax_erase_kstack
6929+
6930 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6931 RESTORE_REST
6932 cmpq $(IA32_NR_syscalls-1),%rax
6933diff -urNp linux-2.6.32.45/arch/x86/ia32/ia32_signal.c linux-2.6.32.45/arch/x86/ia32/ia32_signal.c
6934--- linux-2.6.32.45/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6935+++ linux-2.6.32.45/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6936@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6937 sp -= frame_size;
6938 /* Align the stack pointer according to the i386 ABI,
6939 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6940- sp = ((sp + 4) & -16ul) - 4;
6941+ sp = ((sp - 12) & -16ul) - 4;
6942 return (void __user *) sp;
6943 }
6944
6945@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6946 * These are actually not used anymore, but left because some
6947 * gdb versions depend on them as a marker.
6948 */
6949- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6950+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6951 } put_user_catch(err);
6952
6953 if (err)
6954@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6955 0xb8,
6956 __NR_ia32_rt_sigreturn,
6957 0x80cd,
6958- 0,
6959+ 0
6960 };
6961
6962 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6963@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6964
6965 if (ka->sa.sa_flags & SA_RESTORER)
6966 restorer = ka->sa.sa_restorer;
6967+ else if (current->mm->context.vdso)
6968+ /* Return stub is in 32bit vsyscall page */
6969+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6970 else
6971- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6972- rt_sigreturn);
6973+ restorer = &frame->retcode;
6974 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6975
6976 /*
6977 * Not actually used anymore, but left because some gdb
6978 * versions need it.
6979 */
6980- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6981+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6982 } put_user_catch(err);
6983
6984 if (err)
6985diff -urNp linux-2.6.32.45/arch/x86/include/asm/alternative.h linux-2.6.32.45/arch/x86/include/asm/alternative.h
6986--- linux-2.6.32.45/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6987+++ linux-2.6.32.45/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6988@@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6989 " .byte 662b-661b\n" /* sourcelen */ \
6990 " .byte 664f-663f\n" /* replacementlen */ \
6991 ".previous\n" \
6992- ".section .altinstr_replacement, \"ax\"\n" \
6993+ ".section .altinstr_replacement, \"a\"\n" \
6994 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6995 ".previous"
6996
6997diff -urNp linux-2.6.32.45/arch/x86/include/asm/apic.h linux-2.6.32.45/arch/x86/include/asm/apic.h
6998--- linux-2.6.32.45/arch/x86/include/asm/apic.h 2011-03-27 14:31:47.000000000 -0400
6999+++ linux-2.6.32.45/arch/x86/include/asm/apic.h 2011-08-17 20:01:15.000000000 -0400
7000@@ -46,7 +46,7 @@ static inline void generic_apic_probe(vo
7001
7002 #ifdef CONFIG_X86_LOCAL_APIC
7003
7004-extern unsigned int apic_verbosity;
7005+extern int apic_verbosity;
7006 extern int local_apic_timer_c2_ok;
7007
7008 extern int disable_apic;
7009diff -urNp linux-2.6.32.45/arch/x86/include/asm/apm.h linux-2.6.32.45/arch/x86/include/asm/apm.h
7010--- linux-2.6.32.45/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
7011+++ linux-2.6.32.45/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
7012@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
7013 __asm__ __volatile__(APM_DO_ZERO_SEGS
7014 "pushl %%edi\n\t"
7015 "pushl %%ebp\n\t"
7016- "lcall *%%cs:apm_bios_entry\n\t"
7017+ "lcall *%%ss:apm_bios_entry\n\t"
7018 "setc %%al\n\t"
7019 "popl %%ebp\n\t"
7020 "popl %%edi\n\t"
7021@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
7022 __asm__ __volatile__(APM_DO_ZERO_SEGS
7023 "pushl %%edi\n\t"
7024 "pushl %%ebp\n\t"
7025- "lcall *%%cs:apm_bios_entry\n\t"
7026+ "lcall *%%ss:apm_bios_entry\n\t"
7027 "setc %%bl\n\t"
7028 "popl %%ebp\n\t"
7029 "popl %%edi\n\t"
7030diff -urNp linux-2.6.32.45/arch/x86/include/asm/atomic_32.h linux-2.6.32.45/arch/x86/include/asm/atomic_32.h
7031--- linux-2.6.32.45/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
7032+++ linux-2.6.32.45/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
7033@@ -25,6 +25,17 @@ static inline int atomic_read(const atom
7034 }
7035
7036 /**
7037+ * atomic_read_unchecked - read atomic variable
7038+ * @v: pointer of type atomic_unchecked_t
7039+ *
7040+ * Atomically reads the value of @v.
7041+ */
7042+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7043+{
7044+ return v->counter;
7045+}
7046+
7047+/**
7048 * atomic_set - set atomic variable
7049 * @v: pointer of type atomic_t
7050 * @i: required value
7051@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
7052 }
7053
7054 /**
7055+ * atomic_set_unchecked - set atomic variable
7056+ * @v: pointer of type atomic_unchecked_t
7057+ * @i: required value
7058+ *
7059+ * Atomically sets the value of @v to @i.
7060+ */
7061+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7062+{
7063+ v->counter = i;
7064+}
7065+
7066+/**
7067 * atomic_add - add integer to atomic variable
7068 * @i: integer value to add
7069 * @v: pointer of type atomic_t
7070@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
7071 */
7072 static inline void atomic_add(int i, atomic_t *v)
7073 {
7074- asm volatile(LOCK_PREFIX "addl %1,%0"
7075+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7076+
7077+#ifdef CONFIG_PAX_REFCOUNT
7078+ "jno 0f\n"
7079+ LOCK_PREFIX "subl %1,%0\n"
7080+ "int $4\n0:\n"
7081+ _ASM_EXTABLE(0b, 0b)
7082+#endif
7083+
7084+ : "+m" (v->counter)
7085+ : "ir" (i));
7086+}
7087+
7088+/**
7089+ * atomic_add_unchecked - add integer to atomic variable
7090+ * @i: integer value to add
7091+ * @v: pointer of type atomic_unchecked_t
7092+ *
7093+ * Atomically adds @i to @v.
7094+ */
7095+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7096+{
7097+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7098 : "+m" (v->counter)
7099 : "ir" (i));
7100 }
7101@@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
7102 */
7103 static inline void atomic_sub(int i, atomic_t *v)
7104 {
7105- asm volatile(LOCK_PREFIX "subl %1,%0"
7106+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7107+
7108+#ifdef CONFIG_PAX_REFCOUNT
7109+ "jno 0f\n"
7110+ LOCK_PREFIX "addl %1,%0\n"
7111+ "int $4\n0:\n"
7112+ _ASM_EXTABLE(0b, 0b)
7113+#endif
7114+
7115+ : "+m" (v->counter)
7116+ : "ir" (i));
7117+}
7118+
7119+/**
7120+ * atomic_sub_unchecked - subtract integer from atomic variable
7121+ * @i: integer value to subtract
7122+ * @v: pointer of type atomic_unchecked_t
7123+ *
7124+ * Atomically subtracts @i from @v.
7125+ */
7126+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7127+{
7128+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7129 : "+m" (v->counter)
7130 : "ir" (i));
7131 }
7132@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
7133 {
7134 unsigned char c;
7135
7136- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7137+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7138+
7139+#ifdef CONFIG_PAX_REFCOUNT
7140+ "jno 0f\n"
7141+ LOCK_PREFIX "addl %2,%0\n"
7142+ "int $4\n0:\n"
7143+ _ASM_EXTABLE(0b, 0b)
7144+#endif
7145+
7146+ "sete %1\n"
7147 : "+m" (v->counter), "=qm" (c)
7148 : "ir" (i) : "memory");
7149 return c;
7150@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
7151 */
7152 static inline void atomic_inc(atomic_t *v)
7153 {
7154- asm volatile(LOCK_PREFIX "incl %0"
7155+ asm volatile(LOCK_PREFIX "incl %0\n"
7156+
7157+#ifdef CONFIG_PAX_REFCOUNT
7158+ "jno 0f\n"
7159+ LOCK_PREFIX "decl %0\n"
7160+ "int $4\n0:\n"
7161+ _ASM_EXTABLE(0b, 0b)
7162+#endif
7163+
7164+ : "+m" (v->counter));
7165+}
7166+
7167+/**
7168+ * atomic_inc_unchecked - increment atomic variable
7169+ * @v: pointer of type atomic_unchecked_t
7170+ *
7171+ * Atomically increments @v by 1.
7172+ */
7173+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7174+{
7175+ asm volatile(LOCK_PREFIX "incl %0\n"
7176 : "+m" (v->counter));
7177 }
7178
7179@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
7180 */
7181 static inline void atomic_dec(atomic_t *v)
7182 {
7183- asm volatile(LOCK_PREFIX "decl %0"
7184+ asm volatile(LOCK_PREFIX "decl %0\n"
7185+
7186+#ifdef CONFIG_PAX_REFCOUNT
7187+ "jno 0f\n"
7188+ LOCK_PREFIX "incl %0\n"
7189+ "int $4\n0:\n"
7190+ _ASM_EXTABLE(0b, 0b)
7191+#endif
7192+
7193+ : "+m" (v->counter));
7194+}
7195+
7196+/**
7197+ * atomic_dec_unchecked - decrement atomic variable
7198+ * @v: pointer of type atomic_unchecked_t
7199+ *
7200+ * Atomically decrements @v by 1.
7201+ */
7202+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7203+{
7204+ asm volatile(LOCK_PREFIX "decl %0\n"
7205 : "+m" (v->counter));
7206 }
7207
7208@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
7209 {
7210 unsigned char c;
7211
7212- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7213+ asm volatile(LOCK_PREFIX "decl %0\n"
7214+
7215+#ifdef CONFIG_PAX_REFCOUNT
7216+ "jno 0f\n"
7217+ LOCK_PREFIX "incl %0\n"
7218+ "int $4\n0:\n"
7219+ _ASM_EXTABLE(0b, 0b)
7220+#endif
7221+
7222+ "sete %1\n"
7223 : "+m" (v->counter), "=qm" (c)
7224 : : "memory");
7225 return c != 0;
7226@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
7227 {
7228 unsigned char c;
7229
7230- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7231+ asm volatile(LOCK_PREFIX "incl %0\n"
7232+
7233+#ifdef CONFIG_PAX_REFCOUNT
7234+ "jno 0f\n"
7235+ LOCK_PREFIX "decl %0\n"
7236+ "into\n0:\n"
7237+ _ASM_EXTABLE(0b, 0b)
7238+#endif
7239+
7240+ "sete %1\n"
7241+ : "+m" (v->counter), "=qm" (c)
7242+ : : "memory");
7243+ return c != 0;
7244+}
7245+
7246+/**
7247+ * atomic_inc_and_test_unchecked - increment and test
7248+ * @v: pointer of type atomic_unchecked_t
7249+ *
7250+ * Atomically increments @v by 1
7251+ * and returns true if the result is zero, or false for all
7252+ * other cases.
7253+ */
7254+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7255+{
7256+ unsigned char c;
7257+
7258+ asm volatile(LOCK_PREFIX "incl %0\n"
7259+ "sete %1\n"
7260 : "+m" (v->counter), "=qm" (c)
7261 : : "memory");
7262 return c != 0;
7263@@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
7264 {
7265 unsigned char c;
7266
7267- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7268+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7269+
7270+#ifdef CONFIG_PAX_REFCOUNT
7271+ "jno 0f\n"
7272+ LOCK_PREFIX "subl %2,%0\n"
7273+ "int $4\n0:\n"
7274+ _ASM_EXTABLE(0b, 0b)
7275+#endif
7276+
7277+ "sets %1\n"
7278 : "+m" (v->counter), "=qm" (c)
7279 : "ir" (i) : "memory");
7280 return c;
7281@@ -179,6 +341,46 @@ static inline int atomic_add_return(int
7282 #endif
7283 /* Modern 486+ processor */
7284 __i = i;
7285+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7286+
7287+#ifdef CONFIG_PAX_REFCOUNT
7288+ "jno 0f\n"
7289+ "movl %0, %1\n"
7290+ "int $4\n0:\n"
7291+ _ASM_EXTABLE(0b, 0b)
7292+#endif
7293+
7294+ : "+r" (i), "+m" (v->counter)
7295+ : : "memory");
7296+ return i + __i;
7297+
7298+#ifdef CONFIG_M386
7299+no_xadd: /* Legacy 386 processor */
7300+ local_irq_save(flags);
7301+ __i = atomic_read(v);
7302+ atomic_set(v, i + __i);
7303+ local_irq_restore(flags);
7304+ return i + __i;
7305+#endif
7306+}
7307+
7308+/**
7309+ * atomic_add_return_unchecked - add integer and return
7310+ * @v: pointer of type atomic_unchecked_t
7311+ * @i: integer value to add
7312+ *
7313+ * Atomically adds @i to @v and returns @i + @v
7314+ */
7315+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7316+{
7317+ int __i;
7318+#ifdef CONFIG_M386
7319+ unsigned long flags;
7320+ if (unlikely(boot_cpu_data.x86 <= 3))
7321+ goto no_xadd;
7322+#endif
7323+ /* Modern 486+ processor */
7324+ __i = i;
7325 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7326 : "+r" (i), "+m" (v->counter)
7327 : : "memory");
7328@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
7329 return cmpxchg(&v->counter, old, new);
7330 }
7331
7332+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7333+{
7334+ return cmpxchg(&v->counter, old, new);
7335+}
7336+
7337 static inline int atomic_xchg(atomic_t *v, int new)
7338 {
7339 return xchg(&v->counter, new);
7340 }
7341
7342+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7343+{
7344+ return xchg(&v->counter, new);
7345+}
7346+
7347 /**
7348 * atomic_add_unless - add unless the number is already a given value
7349 * @v: pointer of type atomic_t
7350@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7351 */
7352 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7353 {
7354- int c, old;
7355+ int c, old, new;
7356 c = atomic_read(v);
7357 for (;;) {
7358- if (unlikely(c == (u)))
7359+ if (unlikely(c == u))
7360 break;
7361- old = atomic_cmpxchg((v), c, c + (a));
7362+
7363+ asm volatile("addl %2,%0\n"
7364+
7365+#ifdef CONFIG_PAX_REFCOUNT
7366+ "jno 0f\n"
7367+ "subl %2,%0\n"
7368+ "int $4\n0:\n"
7369+ _ASM_EXTABLE(0b, 0b)
7370+#endif
7371+
7372+ : "=r" (new)
7373+ : "0" (c), "ir" (a));
7374+
7375+ old = atomic_cmpxchg(v, c, new);
7376 if (likely(old == c))
7377 break;
7378 c = old;
7379 }
7380- return c != (u);
7381+ return c != u;
7382 }
7383
7384 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7385
7386 #define atomic_inc_return(v) (atomic_add_return(1, v))
7387+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7388+{
7389+ return atomic_add_return_unchecked(1, v);
7390+}
7391 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7392
7393 /* These are x86-specific, used by some header files */
7394@@ -266,9 +495,18 @@ typedef struct {
7395 u64 __aligned(8) counter;
7396 } atomic64_t;
7397
7398+#ifdef CONFIG_PAX_REFCOUNT
7399+typedef struct {
7400+ u64 __aligned(8) counter;
7401+} atomic64_unchecked_t;
7402+#else
7403+typedef atomic64_t atomic64_unchecked_t;
7404+#endif
7405+
7406 #define ATOMIC64_INIT(val) { (val) }
7407
7408 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7409+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7410
7411 /**
7412 * atomic64_xchg - xchg atomic64 variable
7413@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7414 * the old value.
7415 */
7416 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7417+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7418
7419 /**
7420 * atomic64_set - set atomic64 variable
7421@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7422 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7423
7424 /**
7425+ * atomic64_unchecked_set - set atomic64 variable
7426+ * @ptr: pointer to type atomic64_unchecked_t
7427+ * @new_val: value to assign
7428+ *
7429+ * Atomically sets the value of @ptr to @new_val.
7430+ */
7431+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7432+
7433+/**
7434 * atomic64_read - read atomic64 variable
7435 * @ptr: pointer to type atomic64_t
7436 *
7437@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7438 return res;
7439 }
7440
7441-extern u64 atomic64_read(atomic64_t *ptr);
7442+/**
7443+ * atomic64_read_unchecked - read atomic64 variable
7444+ * @ptr: pointer to type atomic64_unchecked_t
7445+ *
7446+ * Atomically reads the value of @ptr and returns it.
7447+ */
7448+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7449+{
7450+ u64 res;
7451+
7452+ /*
7453+ * Note, we inline this atomic64_unchecked_t primitive because
7454+ * it only clobbers EAX/EDX and leaves the others
7455+ * untouched. We also (somewhat subtly) rely on the
7456+ * fact that cmpxchg8b returns the current 64-bit value
7457+ * of the memory location we are touching:
7458+ */
7459+ asm volatile(
7460+ "mov %%ebx, %%eax\n\t"
7461+ "mov %%ecx, %%edx\n\t"
7462+ LOCK_PREFIX "cmpxchg8b %1\n"
7463+ : "=&A" (res)
7464+ : "m" (*ptr)
7465+ );
7466+
7467+ return res;
7468+}
7469
7470 /**
7471 * atomic64_add_return - add and return
7472@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7473 * Other variants with different arithmetic operators:
7474 */
7475 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7476+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7477 extern u64 atomic64_inc_return(atomic64_t *ptr);
7478+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7479 extern u64 atomic64_dec_return(atomic64_t *ptr);
7480+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7481
7482 /**
7483 * atomic64_add - add integer to atomic64 variable
7484@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7485 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7486
7487 /**
7488+ * atomic64_add_unchecked - add integer to atomic64 variable
7489+ * @delta: integer value to add
7490+ * @ptr: pointer to type atomic64_unchecked_t
7491+ *
7492+ * Atomically adds @delta to @ptr.
7493+ */
7494+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7495+
7496+/**
7497 * atomic64_sub - subtract the atomic64 variable
7498 * @delta: integer value to subtract
7499 * @ptr: pointer to type atomic64_t
7500@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7501 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7502
7503 /**
7504+ * atomic64_sub_unchecked - subtract the atomic64 variable
7505+ * @delta: integer value to subtract
7506+ * @ptr: pointer to type atomic64_unchecked_t
7507+ *
7508+ * Atomically subtracts @delta from @ptr.
7509+ */
7510+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7511+
7512+/**
7513 * atomic64_sub_and_test - subtract value from variable and test result
7514 * @delta: integer value to subtract
7515 * @ptr: pointer to type atomic64_t
7516@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7517 extern void atomic64_inc(atomic64_t *ptr);
7518
7519 /**
7520+ * atomic64_inc_unchecked - increment atomic64 variable
7521+ * @ptr: pointer to type atomic64_unchecked_t
7522+ *
7523+ * Atomically increments @ptr by 1.
7524+ */
7525+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7526+
7527+/**
7528 * atomic64_dec - decrement atomic64 variable
7529 * @ptr: pointer to type atomic64_t
7530 *
7531@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7532 extern void atomic64_dec(atomic64_t *ptr);
7533
7534 /**
7535+ * atomic64_dec_unchecked - decrement atomic64 variable
7536+ * @ptr: pointer to type atomic64_unchecked_t
7537+ *
7538+ * Atomically decrements @ptr by 1.
7539+ */
7540+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7541+
7542+/**
7543 * atomic64_dec_and_test - decrement and test
7544 * @ptr: pointer to type atomic64_t
7545 *
7546diff -urNp linux-2.6.32.45/arch/x86/include/asm/atomic_64.h linux-2.6.32.45/arch/x86/include/asm/atomic_64.h
7547--- linux-2.6.32.45/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7548+++ linux-2.6.32.45/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7549@@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7550 }
7551
7552 /**
7553+ * atomic_read_unchecked - read atomic variable
7554+ * @v: pointer of type atomic_unchecked_t
7555+ *
7556+ * Atomically reads the value of @v.
7557+ */
7558+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7559+{
7560+ return v->counter;
7561+}
7562+
7563+/**
7564 * atomic_set - set atomic variable
7565 * @v: pointer of type atomic_t
7566 * @i: required value
7567@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7568 }
7569
7570 /**
7571+ * atomic_set_unchecked - set atomic variable
7572+ * @v: pointer of type atomic_unchecked_t
7573+ * @i: required value
7574+ *
7575+ * Atomically sets the value of @v to @i.
7576+ */
7577+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7578+{
7579+ v->counter = i;
7580+}
7581+
7582+/**
7583 * atomic_add - add integer to atomic variable
7584 * @i: integer value to add
7585 * @v: pointer of type atomic_t
7586@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7587 */
7588 static inline void atomic_add(int i, atomic_t *v)
7589 {
7590- asm volatile(LOCK_PREFIX "addl %1,%0"
7591+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7592+
7593+#ifdef CONFIG_PAX_REFCOUNT
7594+ "jno 0f\n"
7595+ LOCK_PREFIX "subl %1,%0\n"
7596+ "int $4\n0:\n"
7597+ _ASM_EXTABLE(0b, 0b)
7598+#endif
7599+
7600+ : "=m" (v->counter)
7601+ : "ir" (i), "m" (v->counter));
7602+}
7603+
7604+/**
7605+ * atomic_add_unchecked - add integer to atomic variable
7606+ * @i: integer value to add
7607+ * @v: pointer of type atomic_unchecked_t
7608+ *
7609+ * Atomically adds @i to @v.
7610+ */
7611+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7612+{
7613+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7614 : "=m" (v->counter)
7615 : "ir" (i), "m" (v->counter));
7616 }
7617@@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7618 */
7619 static inline void atomic_sub(int i, atomic_t *v)
7620 {
7621- asm volatile(LOCK_PREFIX "subl %1,%0"
7622+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7623+
7624+#ifdef CONFIG_PAX_REFCOUNT
7625+ "jno 0f\n"
7626+ LOCK_PREFIX "addl %1,%0\n"
7627+ "int $4\n0:\n"
7628+ _ASM_EXTABLE(0b, 0b)
7629+#endif
7630+
7631+ : "=m" (v->counter)
7632+ : "ir" (i), "m" (v->counter));
7633+}
7634+
7635+/**
7636+ * atomic_sub_unchecked - subtract the atomic variable
7637+ * @i: integer value to subtract
7638+ * @v: pointer of type atomic_unchecked_t
7639+ *
7640+ * Atomically subtracts @i from @v.
7641+ */
7642+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7643+{
7644+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7645 : "=m" (v->counter)
7646 : "ir" (i), "m" (v->counter));
7647 }
7648@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7649 {
7650 unsigned char c;
7651
7652- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7653+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7654+
7655+#ifdef CONFIG_PAX_REFCOUNT
7656+ "jno 0f\n"
7657+ LOCK_PREFIX "addl %2,%0\n"
7658+ "int $4\n0:\n"
7659+ _ASM_EXTABLE(0b, 0b)
7660+#endif
7661+
7662+ "sete %1\n"
7663 : "=m" (v->counter), "=qm" (c)
7664 : "ir" (i), "m" (v->counter) : "memory");
7665 return c;
7666@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7667 */
7668 static inline void atomic_inc(atomic_t *v)
7669 {
7670- asm volatile(LOCK_PREFIX "incl %0"
7671+ asm volatile(LOCK_PREFIX "incl %0\n"
7672+
7673+#ifdef CONFIG_PAX_REFCOUNT
7674+ "jno 0f\n"
7675+ LOCK_PREFIX "decl %0\n"
7676+ "int $4\n0:\n"
7677+ _ASM_EXTABLE(0b, 0b)
7678+#endif
7679+
7680+ : "=m" (v->counter)
7681+ : "m" (v->counter));
7682+}
7683+
7684+/**
7685+ * atomic_inc_unchecked - increment atomic variable
7686+ * @v: pointer of type atomic_unchecked_t
7687+ *
7688+ * Atomically increments @v by 1.
7689+ */
7690+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7691+{
7692+ asm volatile(LOCK_PREFIX "incl %0\n"
7693 : "=m" (v->counter)
7694 : "m" (v->counter));
7695 }
7696@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7697 */
7698 static inline void atomic_dec(atomic_t *v)
7699 {
7700- asm volatile(LOCK_PREFIX "decl %0"
7701+ asm volatile(LOCK_PREFIX "decl %0\n"
7702+
7703+#ifdef CONFIG_PAX_REFCOUNT
7704+ "jno 0f\n"
7705+ LOCK_PREFIX "incl %0\n"
7706+ "int $4\n0:\n"
7707+ _ASM_EXTABLE(0b, 0b)
7708+#endif
7709+
7710+ : "=m" (v->counter)
7711+ : "m" (v->counter));
7712+}
7713+
7714+/**
7715+ * atomic_dec_unchecked - decrement atomic variable
7716+ * @v: pointer of type atomic_unchecked_t
7717+ *
7718+ * Atomically decrements @v by 1.
7719+ */
7720+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7721+{
7722+ asm volatile(LOCK_PREFIX "decl %0\n"
7723 : "=m" (v->counter)
7724 : "m" (v->counter));
7725 }
7726@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7727 {
7728 unsigned char c;
7729
7730- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7731+ asm volatile(LOCK_PREFIX "decl %0\n"
7732+
7733+#ifdef CONFIG_PAX_REFCOUNT
7734+ "jno 0f\n"
7735+ LOCK_PREFIX "incl %0\n"
7736+ "int $4\n0:\n"
7737+ _ASM_EXTABLE(0b, 0b)
7738+#endif
7739+
7740+ "sete %1\n"
7741 : "=m" (v->counter), "=qm" (c)
7742 : "m" (v->counter) : "memory");
7743 return c != 0;
7744@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7745 {
7746 unsigned char c;
7747
7748- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7749+ asm volatile(LOCK_PREFIX "incl %0\n"
7750+
7751+#ifdef CONFIG_PAX_REFCOUNT
7752+ "jno 0f\n"
7753+ LOCK_PREFIX "decl %0\n"
7754+ "int $4\n0:\n"
7755+ _ASM_EXTABLE(0b, 0b)
7756+#endif
7757+
7758+ "sete %1\n"
7759+ : "=m" (v->counter), "=qm" (c)
7760+ : "m" (v->counter) : "memory");
7761+ return c != 0;
7762+}
7763+
7764+/**
7765+ * atomic_inc_and_test_unchecked - increment and test
7766+ * @v: pointer of type atomic_unchecked_t
7767+ *
7768+ * Atomically increments @v by 1
7769+ * and returns true if the result is zero, or false for all
7770+ * other cases.
7771+ */
7772+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7773+{
7774+ unsigned char c;
7775+
7776+ asm volatile(LOCK_PREFIX "incl %0\n"
7777+ "sete %1\n"
7778 : "=m" (v->counter), "=qm" (c)
7779 : "m" (v->counter) : "memory");
7780 return c != 0;
7781@@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7782 {
7783 unsigned char c;
7784
7785- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7786+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7787+
7788+#ifdef CONFIG_PAX_REFCOUNT
7789+ "jno 0f\n"
7790+ LOCK_PREFIX "subl %2,%0\n"
7791+ "int $4\n0:\n"
7792+ _ASM_EXTABLE(0b, 0b)
7793+#endif
7794+
7795+ "sets %1\n"
7796 : "=m" (v->counter), "=qm" (c)
7797 : "ir" (i), "m" (v->counter) : "memory");
7798 return c;
7799@@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7800 static inline int atomic_add_return(int i, atomic_t *v)
7801 {
7802 int __i = i;
7803- asm volatile(LOCK_PREFIX "xaddl %0, %1"
7804+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7805+
7806+#ifdef CONFIG_PAX_REFCOUNT
7807+ "jno 0f\n"
7808+ "movl %0, %1\n"
7809+ "int $4\n0:\n"
7810+ _ASM_EXTABLE(0b, 0b)
7811+#endif
7812+
7813+ : "+r" (i), "+m" (v->counter)
7814+ : : "memory");
7815+ return i + __i;
7816+}
7817+
7818+/**
7819+ * atomic_add_return_unchecked - add and return
7820+ * @i: integer value to add
7821+ * @v: pointer of type atomic_unchecked_t
7822+ *
7823+ * Atomically adds @i to @v and returns @i + @v
7824+ */
7825+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7826+{
7827+ int __i = i;
7828+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7829 : "+r" (i), "+m" (v->counter)
7830 : : "memory");
7831 return i + __i;
7832@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7833 }
7834
7835 #define atomic_inc_return(v) (atomic_add_return(1, v))
7836+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7837+{
7838+ return atomic_add_return_unchecked(1, v);
7839+}
7840 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7841
7842 /* The 64-bit atomic type */
7843@@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7844 }
7845
7846 /**
7847+ * atomic64_read_unchecked - read atomic64 variable
7848+ * @v: pointer of type atomic64_unchecked_t
7849+ *
7850+ * Atomically reads the value of @v.
7851+ * Doesn't imply a read memory barrier.
7852+ */
7853+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7854+{
7855+ return v->counter;
7856+}
7857+
7858+/**
7859 * atomic64_set - set atomic64 variable
7860 * @v: pointer to type atomic64_t
7861 * @i: required value
7862@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7863 }
7864
7865 /**
7866+ * atomic64_set_unchecked - set atomic64 variable
7867+ * @v: pointer to type atomic64_unchecked_t
7868+ * @i: required value
7869+ *
7870+ * Atomically sets the value of @v to @i.
7871+ */
7872+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7873+{
7874+ v->counter = i;
7875+}
7876+
7877+/**
7878 * atomic64_add - add integer to atomic64 variable
7879 * @i: integer value to add
7880 * @v: pointer to type atomic64_t
7881@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7882 */
7883 static inline void atomic64_add(long i, atomic64_t *v)
7884 {
7885+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
7886+
7887+#ifdef CONFIG_PAX_REFCOUNT
7888+ "jno 0f\n"
7889+ LOCK_PREFIX "subq %1,%0\n"
7890+ "int $4\n0:\n"
7891+ _ASM_EXTABLE(0b, 0b)
7892+#endif
7893+
7894+ : "=m" (v->counter)
7895+ : "er" (i), "m" (v->counter));
7896+}
7897+
7898+/**
7899+ * atomic64_add_unchecked - add integer to atomic64 variable
7900+ * @i: integer value to add
7901+ * @v: pointer to type atomic64_unchecked_t
7902+ *
7903+ * Atomically adds @i to @v.
7904+ */
7905+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7906+{
7907 asm volatile(LOCK_PREFIX "addq %1,%0"
7908 : "=m" (v->counter)
7909 : "er" (i), "m" (v->counter));
7910@@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7911 */
7912 static inline void atomic64_sub(long i, atomic64_t *v)
7913 {
7914- asm volatile(LOCK_PREFIX "subq %1,%0"
7915+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7916+
7917+#ifdef CONFIG_PAX_REFCOUNT
7918+ "jno 0f\n"
7919+ LOCK_PREFIX "addq %1,%0\n"
7920+ "int $4\n0:\n"
7921+ _ASM_EXTABLE(0b, 0b)
7922+#endif
7923+
7924 : "=m" (v->counter)
7925 : "er" (i), "m" (v->counter));
7926 }
7927@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7928 {
7929 unsigned char c;
7930
7931- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7932+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
7933+
7934+#ifdef CONFIG_PAX_REFCOUNT
7935+ "jno 0f\n"
7936+ LOCK_PREFIX "addq %2,%0\n"
7937+ "int $4\n0:\n"
7938+ _ASM_EXTABLE(0b, 0b)
7939+#endif
7940+
7941+ "sete %1\n"
7942 : "=m" (v->counter), "=qm" (c)
7943 : "er" (i), "m" (v->counter) : "memory");
7944 return c;
7945@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7946 */
7947 static inline void atomic64_inc(atomic64_t *v)
7948 {
7949+ asm volatile(LOCK_PREFIX "incq %0\n"
7950+
7951+#ifdef CONFIG_PAX_REFCOUNT
7952+ "jno 0f\n"
7953+ LOCK_PREFIX "decq %0\n"
7954+ "int $4\n0:\n"
7955+ _ASM_EXTABLE(0b, 0b)
7956+#endif
7957+
7958+ : "=m" (v->counter)
7959+ : "m" (v->counter));
7960+}
7961+
7962+/**
7963+ * atomic64_inc_unchecked - increment atomic64 variable
7964+ * @v: pointer to type atomic64_unchecked_t
7965+ *
7966+ * Atomically increments @v by 1.
7967+ */
7968+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7969+{
7970 asm volatile(LOCK_PREFIX "incq %0"
7971 : "=m" (v->counter)
7972 : "m" (v->counter));
7973@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7974 */
7975 static inline void atomic64_dec(atomic64_t *v)
7976 {
7977- asm volatile(LOCK_PREFIX "decq %0"
7978+ asm volatile(LOCK_PREFIX "decq %0\n"
7979+
7980+#ifdef CONFIG_PAX_REFCOUNT
7981+ "jno 0f\n"
7982+ LOCK_PREFIX "incq %0\n"
7983+ "int $4\n0:\n"
7984+ _ASM_EXTABLE(0b, 0b)
7985+#endif
7986+
7987+ : "=m" (v->counter)
7988+ : "m" (v->counter));
7989+}
7990+
7991+/**
7992+ * atomic64_dec_unchecked - decrement atomic64 variable
7993+ * @v: pointer to type atomic64_t
7994+ *
7995+ * Atomically decrements @v by 1.
7996+ */
7997+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7998+{
7999+ asm volatile(LOCK_PREFIX "decq %0\n"
8000 : "=m" (v->counter)
8001 : "m" (v->counter));
8002 }
8003@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
8004 {
8005 unsigned char c;
8006
8007- asm volatile(LOCK_PREFIX "decq %0; sete %1"
8008+ asm volatile(LOCK_PREFIX "decq %0\n"
8009+
8010+#ifdef CONFIG_PAX_REFCOUNT
8011+ "jno 0f\n"
8012+ LOCK_PREFIX "incq %0\n"
8013+ "int $4\n0:\n"
8014+ _ASM_EXTABLE(0b, 0b)
8015+#endif
8016+
8017+ "sete %1\n"
8018 : "=m" (v->counter), "=qm" (c)
8019 : "m" (v->counter) : "memory");
8020 return c != 0;
8021@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
8022 {
8023 unsigned char c;
8024
8025- asm volatile(LOCK_PREFIX "incq %0; sete %1"
8026+ asm volatile(LOCK_PREFIX "incq %0\n"
8027+
8028+#ifdef CONFIG_PAX_REFCOUNT
8029+ "jno 0f\n"
8030+ LOCK_PREFIX "decq %0\n"
8031+ "int $4\n0:\n"
8032+ _ASM_EXTABLE(0b, 0b)
8033+#endif
8034+
8035+ "sete %1\n"
8036 : "=m" (v->counter), "=qm" (c)
8037 : "m" (v->counter) : "memory");
8038 return c != 0;
8039@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
8040 {
8041 unsigned char c;
8042
8043- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
8044+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
8045+
8046+#ifdef CONFIG_PAX_REFCOUNT
8047+ "jno 0f\n"
8048+ LOCK_PREFIX "subq %2,%0\n"
8049+ "int $4\n0:\n"
8050+ _ASM_EXTABLE(0b, 0b)
8051+#endif
8052+
8053+ "sets %1\n"
8054 : "=m" (v->counter), "=qm" (c)
8055 : "er" (i), "m" (v->counter) : "memory");
8056 return c;
8057@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
8058 static inline long atomic64_add_return(long i, atomic64_t *v)
8059 {
8060 long __i = i;
8061- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
8062+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
8063+
8064+#ifdef CONFIG_PAX_REFCOUNT
8065+ "jno 0f\n"
8066+ "movq %0, %1\n"
8067+ "int $4\n0:\n"
8068+ _ASM_EXTABLE(0b, 0b)
8069+#endif
8070+
8071+ : "+r" (i), "+m" (v->counter)
8072+ : : "memory");
8073+ return i + __i;
8074+}
8075+
8076+/**
8077+ * atomic64_add_return_unchecked - add and return
8078+ * @i: integer value to add
8079+ * @v: pointer to type atomic64_unchecked_t
8080+ *
8081+ * Atomically adds @i to @v and returns @i + @v
8082+ */
8083+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8084+{
8085+ long __i = i;
8086+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
8087 : "+r" (i), "+m" (v->counter)
8088 : : "memory");
8089 return i + __i;
8090@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
8091 }
8092
8093 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
8094+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8095+{
8096+ return atomic64_add_return_unchecked(1, v);
8097+}
8098 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
8099
8100 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8101@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
8102 return cmpxchg(&v->counter, old, new);
8103 }
8104
8105+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8106+{
8107+ return cmpxchg(&v->counter, old, new);
8108+}
8109+
8110 static inline long atomic64_xchg(atomic64_t *v, long new)
8111 {
8112 return xchg(&v->counter, new);
8113 }
8114
8115+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8116+{
8117+ return xchg(&v->counter, new);
8118+}
8119+
8120 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
8121 {
8122 return cmpxchg(&v->counter, old, new);
8123 }
8124
8125+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8126+{
8127+ return cmpxchg(&v->counter, old, new);
8128+}
8129+
8130 static inline long atomic_xchg(atomic_t *v, int new)
8131 {
8132 return xchg(&v->counter, new);
8133 }
8134
8135+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8136+{
8137+ return xchg(&v->counter, new);
8138+}
8139+
8140 /**
8141 * atomic_add_unless - add unless the number is a given value
8142 * @v: pointer of type atomic_t
8143@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
8144 */
8145 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8146 {
8147- int c, old;
8148+ int c, old, new;
8149 c = atomic_read(v);
8150 for (;;) {
8151- if (unlikely(c == (u)))
8152+ if (unlikely(c == u))
8153 break;
8154- old = atomic_cmpxchg((v), c, c + (a));
8155+
8156+ asm volatile("addl %2,%0\n"
8157+
8158+#ifdef CONFIG_PAX_REFCOUNT
8159+ "jno 0f\n"
8160+ "subl %2,%0\n"
8161+ "int $4\n0:\n"
8162+ _ASM_EXTABLE(0b, 0b)
8163+#endif
8164+
8165+ : "=r" (new)
8166+ : "0" (c), "ir" (a));
8167+
8168+ old = atomic_cmpxchg(v, c, new);
8169 if (likely(old == c))
8170 break;
8171 c = old;
8172 }
8173- return c != (u);
8174+ return c != u;
8175 }
8176
8177 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8178@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
8179 */
8180 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8181 {
8182- long c, old;
8183+ long c, old, new;
8184 c = atomic64_read(v);
8185 for (;;) {
8186- if (unlikely(c == (u)))
8187+ if (unlikely(c == u))
8188 break;
8189- old = atomic64_cmpxchg((v), c, c + (a));
8190+
8191+ asm volatile("addq %2,%0\n"
8192+
8193+#ifdef CONFIG_PAX_REFCOUNT
8194+ "jno 0f\n"
8195+ "subq %2,%0\n"
8196+ "int $4\n0:\n"
8197+ _ASM_EXTABLE(0b, 0b)
8198+#endif
8199+
8200+ : "=r" (new)
8201+ : "0" (c), "er" (a));
8202+
8203+ old = atomic64_cmpxchg(v, c, new);
8204 if (likely(old == c))
8205 break;
8206 c = old;
8207 }
8208- return c != (u);
8209+ return c != u;
8210 }
8211
8212 /**
8213diff -urNp linux-2.6.32.45/arch/x86/include/asm/bitops.h linux-2.6.32.45/arch/x86/include/asm/bitops.h
8214--- linux-2.6.32.45/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
8215+++ linux-2.6.32.45/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
8216@@ -38,7 +38,7 @@
8217 * a mask operation on a byte.
8218 */
8219 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8220-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8221+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8222 #define CONST_MASK(nr) (1 << ((nr) & 7))
8223
8224 /**
8225diff -urNp linux-2.6.32.45/arch/x86/include/asm/boot.h linux-2.6.32.45/arch/x86/include/asm/boot.h
8226--- linux-2.6.32.45/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
8227+++ linux-2.6.32.45/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
8228@@ -11,10 +11,15 @@
8229 #include <asm/pgtable_types.h>
8230
8231 /* Physical address where kernel should be loaded. */
8232-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8233+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8234 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8235 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8236
8237+#ifndef __ASSEMBLY__
8238+extern unsigned char __LOAD_PHYSICAL_ADDR[];
8239+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8240+#endif
8241+
8242 /* Minimum kernel alignment, as a power of two */
8243 #ifdef CONFIG_X86_64
8244 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8245diff -urNp linux-2.6.32.45/arch/x86/include/asm/cacheflush.h linux-2.6.32.45/arch/x86/include/asm/cacheflush.h
8246--- linux-2.6.32.45/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
8247+++ linux-2.6.32.45/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
8248@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
8249 static inline unsigned long get_page_memtype(struct page *pg)
8250 {
8251 if (!PageUncached(pg) && !PageWC(pg))
8252- return -1;
8253+ return ~0UL;
8254 else if (!PageUncached(pg) && PageWC(pg))
8255 return _PAGE_CACHE_WC;
8256 else if (PageUncached(pg) && !PageWC(pg))
8257@@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
8258 SetPageWC(pg);
8259 break;
8260 default:
8261- case -1:
8262+ case ~0UL:
8263 ClearPageUncached(pg);
8264 ClearPageWC(pg);
8265 break;
8266diff -urNp linux-2.6.32.45/arch/x86/include/asm/cache.h linux-2.6.32.45/arch/x86/include/asm/cache.h
8267--- linux-2.6.32.45/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
8268+++ linux-2.6.32.45/arch/x86/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
8269@@ -5,9 +5,10 @@
8270
8271 /* L1 cache line size */
8272 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8273-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8274+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8275
8276 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
8277+#define __read_only __attribute__((__section__(".data.read_only")))
8278
8279 #ifdef CONFIG_X86_VSMP
8280 /* vSMP Internode cacheline shift */
8281diff -urNp linux-2.6.32.45/arch/x86/include/asm/checksum_32.h linux-2.6.32.45/arch/x86/include/asm/checksum_32.h
8282--- linux-2.6.32.45/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
8283+++ linux-2.6.32.45/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
8284@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
8285 int len, __wsum sum,
8286 int *src_err_ptr, int *dst_err_ptr);
8287
8288+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8289+ int len, __wsum sum,
8290+ int *src_err_ptr, int *dst_err_ptr);
8291+
8292+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8293+ int len, __wsum sum,
8294+ int *src_err_ptr, int *dst_err_ptr);
8295+
8296 /*
8297 * Note: when you get a NULL pointer exception here this means someone
8298 * passed in an incorrect kernel address to one of these functions.
8299@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
8300 int *err_ptr)
8301 {
8302 might_sleep();
8303- return csum_partial_copy_generic((__force void *)src, dst,
8304+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
8305 len, sum, err_ptr, NULL);
8306 }
8307
8308@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
8309 {
8310 might_sleep();
8311 if (access_ok(VERIFY_WRITE, dst, len))
8312- return csum_partial_copy_generic(src, (__force void *)dst,
8313+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8314 len, sum, NULL, err_ptr);
8315
8316 if (len)
8317diff -urNp linux-2.6.32.45/arch/x86/include/asm/desc_defs.h linux-2.6.32.45/arch/x86/include/asm/desc_defs.h
8318--- linux-2.6.32.45/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
8319+++ linux-2.6.32.45/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
8320@@ -31,6 +31,12 @@ struct desc_struct {
8321 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8322 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8323 };
8324+ struct {
8325+ u16 offset_low;
8326+ u16 seg;
8327+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8328+ unsigned offset_high: 16;
8329+ } gate;
8330 };
8331 } __attribute__((packed));
8332
8333diff -urNp linux-2.6.32.45/arch/x86/include/asm/desc.h linux-2.6.32.45/arch/x86/include/asm/desc.h
8334--- linux-2.6.32.45/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
8335+++ linux-2.6.32.45/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
8336@@ -4,6 +4,7 @@
8337 #include <asm/desc_defs.h>
8338 #include <asm/ldt.h>
8339 #include <asm/mmu.h>
8340+#include <asm/pgtable.h>
8341 #include <linux/smp.h>
8342
8343 static inline void fill_ldt(struct desc_struct *desc,
8344@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8345 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8346 desc->type = (info->read_exec_only ^ 1) << 1;
8347 desc->type |= info->contents << 2;
8348+ desc->type |= info->seg_not_present ^ 1;
8349 desc->s = 1;
8350 desc->dpl = 0x3;
8351 desc->p = info->seg_not_present ^ 1;
8352@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8353 }
8354
8355 extern struct desc_ptr idt_descr;
8356-extern gate_desc idt_table[];
8357-
8358-struct gdt_page {
8359- struct desc_struct gdt[GDT_ENTRIES];
8360-} __attribute__((aligned(PAGE_SIZE)));
8361-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8362+extern gate_desc idt_table[256];
8363
8364+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8365 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8366 {
8367- return per_cpu(gdt_page, cpu).gdt;
8368+ return cpu_gdt_table[cpu];
8369 }
8370
8371 #ifdef CONFIG_X86_64
8372@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8373 unsigned long base, unsigned dpl, unsigned flags,
8374 unsigned short seg)
8375 {
8376- gate->a = (seg << 16) | (base & 0xffff);
8377- gate->b = (base & 0xffff0000) |
8378- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8379+ gate->gate.offset_low = base;
8380+ gate->gate.seg = seg;
8381+ gate->gate.reserved = 0;
8382+ gate->gate.type = type;
8383+ gate->gate.s = 0;
8384+ gate->gate.dpl = dpl;
8385+ gate->gate.p = 1;
8386+ gate->gate.offset_high = base >> 16;
8387 }
8388
8389 #endif
8390@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8391 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8392 const gate_desc *gate)
8393 {
8394+ pax_open_kernel();
8395 memcpy(&idt[entry], gate, sizeof(*gate));
8396+ pax_close_kernel();
8397 }
8398
8399 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8400 const void *desc)
8401 {
8402+ pax_open_kernel();
8403 memcpy(&ldt[entry], desc, 8);
8404+ pax_close_kernel();
8405 }
8406
8407 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8408@@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8409 size = sizeof(struct desc_struct);
8410 break;
8411 }
8412+
8413+ pax_open_kernel();
8414 memcpy(&gdt[entry], desc, size);
8415+ pax_close_kernel();
8416 }
8417
8418 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8419@@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8420
8421 static inline void native_load_tr_desc(void)
8422 {
8423+ pax_open_kernel();
8424 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8425+ pax_close_kernel();
8426 }
8427
8428 static inline void native_load_gdt(const struct desc_ptr *dtr)
8429@@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8430 unsigned int i;
8431 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8432
8433+ pax_open_kernel();
8434 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8435 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8436+ pax_close_kernel();
8437 }
8438
8439 #define _LDT_empty(info) \
8440@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8441 desc->limit = (limit >> 16) & 0xf;
8442 }
8443
8444-static inline void _set_gate(int gate, unsigned type, void *addr,
8445+static inline void _set_gate(int gate, unsigned type, const void *addr,
8446 unsigned dpl, unsigned ist, unsigned seg)
8447 {
8448 gate_desc s;
8449@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8450 * Pentium F0 0F bugfix can have resulted in the mapped
8451 * IDT being write-protected.
8452 */
8453-static inline void set_intr_gate(unsigned int n, void *addr)
8454+static inline void set_intr_gate(unsigned int n, const void *addr)
8455 {
8456 BUG_ON((unsigned)n > 0xFF);
8457 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8458@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8459 /*
8460 * This routine sets up an interrupt gate at directory privilege level 3.
8461 */
8462-static inline void set_system_intr_gate(unsigned int n, void *addr)
8463+static inline void set_system_intr_gate(unsigned int n, const void *addr)
8464 {
8465 BUG_ON((unsigned)n > 0xFF);
8466 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8467 }
8468
8469-static inline void set_system_trap_gate(unsigned int n, void *addr)
8470+static inline void set_system_trap_gate(unsigned int n, const void *addr)
8471 {
8472 BUG_ON((unsigned)n > 0xFF);
8473 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8474 }
8475
8476-static inline void set_trap_gate(unsigned int n, void *addr)
8477+static inline void set_trap_gate(unsigned int n, const void *addr)
8478 {
8479 BUG_ON((unsigned)n > 0xFF);
8480 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8481@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8482 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8483 {
8484 BUG_ON((unsigned)n > 0xFF);
8485- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8486+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8487 }
8488
8489-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8490+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8491 {
8492 BUG_ON((unsigned)n > 0xFF);
8493 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8494 }
8495
8496-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8497+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8498 {
8499 BUG_ON((unsigned)n > 0xFF);
8500 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8501 }
8502
8503+#ifdef CONFIG_X86_32
8504+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8505+{
8506+ struct desc_struct d;
8507+
8508+ if (likely(limit))
8509+ limit = (limit - 1UL) >> PAGE_SHIFT;
8510+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
8511+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8512+}
8513+#endif
8514+
8515 #endif /* _ASM_X86_DESC_H */
8516diff -urNp linux-2.6.32.45/arch/x86/include/asm/device.h linux-2.6.32.45/arch/x86/include/asm/device.h
8517--- linux-2.6.32.45/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8518+++ linux-2.6.32.45/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8519@@ -6,7 +6,7 @@ struct dev_archdata {
8520 void *acpi_handle;
8521 #endif
8522 #ifdef CONFIG_X86_64
8523-struct dma_map_ops *dma_ops;
8524+ const struct dma_map_ops *dma_ops;
8525 #endif
8526 #ifdef CONFIG_DMAR
8527 void *iommu; /* hook for IOMMU specific extension */
8528diff -urNp linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h
8529--- linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8530+++ linux-2.6.32.45/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8531@@ -25,9 +25,9 @@ extern int iommu_merge;
8532 extern struct device x86_dma_fallback_dev;
8533 extern int panic_on_overflow;
8534
8535-extern struct dma_map_ops *dma_ops;
8536+extern const struct dma_map_ops *dma_ops;
8537
8538-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8539+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8540 {
8541 #ifdef CONFIG_X86_32
8542 return dma_ops;
8543@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8544 /* Make sure we keep the same behaviour */
8545 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8546 {
8547- struct dma_map_ops *ops = get_dma_ops(dev);
8548+ const struct dma_map_ops *ops = get_dma_ops(dev);
8549 if (ops->mapping_error)
8550 return ops->mapping_error(dev, dma_addr);
8551
8552@@ -122,7 +122,7 @@ static inline void *
8553 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8554 gfp_t gfp)
8555 {
8556- struct dma_map_ops *ops = get_dma_ops(dev);
8557+ const struct dma_map_ops *ops = get_dma_ops(dev);
8558 void *memory;
8559
8560 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8561@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8562 static inline void dma_free_coherent(struct device *dev, size_t size,
8563 void *vaddr, dma_addr_t bus)
8564 {
8565- struct dma_map_ops *ops = get_dma_ops(dev);
8566+ const struct dma_map_ops *ops = get_dma_ops(dev);
8567
8568 WARN_ON(irqs_disabled()); /* for portability */
8569
8570diff -urNp linux-2.6.32.45/arch/x86/include/asm/e820.h linux-2.6.32.45/arch/x86/include/asm/e820.h
8571--- linux-2.6.32.45/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8572+++ linux-2.6.32.45/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8573@@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8574 #define ISA_END_ADDRESS 0x100000
8575 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8576
8577-#define BIOS_BEGIN 0x000a0000
8578+#define BIOS_BEGIN 0x000c0000
8579 #define BIOS_END 0x00100000
8580
8581 #ifdef __KERNEL__
8582diff -urNp linux-2.6.32.45/arch/x86/include/asm/elf.h linux-2.6.32.45/arch/x86/include/asm/elf.h
8583--- linux-2.6.32.45/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8584+++ linux-2.6.32.45/arch/x86/include/asm/elf.h 2011-04-17 15:56:46.000000000 -0400
8585@@ -257,7 +257,25 @@ extern int force_personality32;
8586 the loader. We need to make sure that it is out of the way of the program
8587 that it will "exec", and that there is sufficient room for the brk. */
8588
8589+#ifdef CONFIG_PAX_SEGMEXEC
8590+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8591+#else
8592 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8593+#endif
8594+
8595+#ifdef CONFIG_PAX_ASLR
8596+#ifdef CONFIG_X86_32
8597+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8598+
8599+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8600+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8601+#else
8602+#define PAX_ELF_ET_DYN_BASE 0x400000UL
8603+
8604+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8605+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8606+#endif
8607+#endif
8608
8609 /* This yields a mask that user programs can use to figure out what
8610 instruction set this CPU supports. This could be done in user space,
8611@@ -311,8 +329,7 @@ do { \
8612 #define ARCH_DLINFO \
8613 do { \
8614 if (vdso_enabled) \
8615- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8616- (unsigned long)current->mm->context.vdso); \
8617+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
8618 } while (0)
8619
8620 #define AT_SYSINFO 32
8621@@ -323,7 +340,7 @@ do { \
8622
8623 #endif /* !CONFIG_X86_32 */
8624
8625-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8626+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8627
8628 #define VDSO_ENTRY \
8629 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8630@@ -337,7 +354,4 @@ extern int arch_setup_additional_pages(s
8631 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8632 #define compat_arch_setup_additional_pages syscall32_setup_pages
8633
8634-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8635-#define arch_randomize_brk arch_randomize_brk
8636-
8637 #endif /* _ASM_X86_ELF_H */
8638diff -urNp linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h
8639--- linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8640+++ linux-2.6.32.45/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8641@@ -15,6 +15,6 @@ enum reboot_type {
8642
8643 extern enum reboot_type reboot_type;
8644
8645-extern void machine_emergency_restart(void);
8646+extern void machine_emergency_restart(void) __noreturn;
8647
8648 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8649diff -urNp linux-2.6.32.45/arch/x86/include/asm/futex.h linux-2.6.32.45/arch/x86/include/asm/futex.h
8650--- linux-2.6.32.45/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8651+++ linux-2.6.32.45/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8652@@ -12,16 +12,18 @@
8653 #include <asm/system.h>
8654
8655 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8656+ typecheck(u32 *, uaddr); \
8657 asm volatile("1:\t" insn "\n" \
8658 "2:\t.section .fixup,\"ax\"\n" \
8659 "3:\tmov\t%3, %1\n" \
8660 "\tjmp\t2b\n" \
8661 "\t.previous\n" \
8662 _ASM_EXTABLE(1b, 3b) \
8663- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8664+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8665 : "i" (-EFAULT), "0" (oparg), "1" (0))
8666
8667 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8668+ typecheck(u32 *, uaddr); \
8669 asm volatile("1:\tmovl %2, %0\n" \
8670 "\tmovl\t%0, %3\n" \
8671 "\t" insn "\n" \
8672@@ -34,10 +36,10 @@
8673 _ASM_EXTABLE(1b, 4b) \
8674 _ASM_EXTABLE(2b, 4b) \
8675 : "=&a" (oldval), "=&r" (ret), \
8676- "+m" (*uaddr), "=&r" (tem) \
8677+ "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8678 : "r" (oparg), "i" (-EFAULT), "1" (0))
8679
8680-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8681+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8682 {
8683 int op = (encoded_op >> 28) & 7;
8684 int cmp = (encoded_op >> 24) & 15;
8685@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8686
8687 switch (op) {
8688 case FUTEX_OP_SET:
8689- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8690+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8691 break;
8692 case FUTEX_OP_ADD:
8693- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8694+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8695 uaddr, oparg);
8696 break;
8697 case FUTEX_OP_OR:
8698@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8699 return ret;
8700 }
8701
8702-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8703+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8704 int newval)
8705 {
8706
8707@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8708 return -ENOSYS;
8709 #endif
8710
8711- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8712+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8713 return -EFAULT;
8714
8715- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8716+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8717 "2:\t.section .fixup, \"ax\"\n"
8718 "3:\tmov %2, %0\n"
8719 "\tjmp 2b\n"
8720 "\t.previous\n"
8721 _ASM_EXTABLE(1b, 3b)
8722- : "=a" (oldval), "+m" (*uaddr)
8723+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8724 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8725 : "memory"
8726 );
8727diff -urNp linux-2.6.32.45/arch/x86/include/asm/hw_irq.h linux-2.6.32.45/arch/x86/include/asm/hw_irq.h
8728--- linux-2.6.32.45/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8729+++ linux-2.6.32.45/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8730@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8731 extern void enable_IO_APIC(void);
8732
8733 /* Statistics */
8734-extern atomic_t irq_err_count;
8735-extern atomic_t irq_mis_count;
8736+extern atomic_unchecked_t irq_err_count;
8737+extern atomic_unchecked_t irq_mis_count;
8738
8739 /* EISA */
8740 extern void eisa_set_level_irq(unsigned int irq);
8741diff -urNp linux-2.6.32.45/arch/x86/include/asm/i387.h linux-2.6.32.45/arch/x86/include/asm/i387.h
8742--- linux-2.6.32.45/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8743+++ linux-2.6.32.45/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8744@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8745 {
8746 int err;
8747
8748+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8749+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8750+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8751+#endif
8752+
8753 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8754 "2:\n"
8755 ".section .fixup,\"ax\"\n"
8756@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8757 {
8758 int err;
8759
8760+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8761+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8762+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8763+#endif
8764+
8765 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8766 "2:\n"
8767 ".section .fixup,\"ax\"\n"
8768@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8769 }
8770
8771 /* We need a safe address that is cheap to find and that is already
8772- in L1 during context switch. The best choices are unfortunately
8773- different for UP and SMP */
8774-#ifdef CONFIG_SMP
8775-#define safe_address (__per_cpu_offset[0])
8776-#else
8777-#define safe_address (kstat_cpu(0).cpustat.user)
8778-#endif
8779+ in L1 during context switch. */
8780+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8781
8782 /*
8783 * These must be called with preempt disabled
8784@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8785 struct thread_info *me = current_thread_info();
8786 preempt_disable();
8787 if (me->status & TS_USEDFPU)
8788- __save_init_fpu(me->task);
8789+ __save_init_fpu(current);
8790 else
8791 clts();
8792 }
8793diff -urNp linux-2.6.32.45/arch/x86/include/asm/io_32.h linux-2.6.32.45/arch/x86/include/asm/io_32.h
8794--- linux-2.6.32.45/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8795+++ linux-2.6.32.45/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8796@@ -3,6 +3,7 @@
8797
8798 #include <linux/string.h>
8799 #include <linux/compiler.h>
8800+#include <asm/processor.h>
8801
8802 /*
8803 * This file contains the definitions for the x86 IO instructions
8804@@ -42,6 +43,17 @@
8805
8806 #ifdef __KERNEL__
8807
8808+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8809+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8810+{
8811+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8812+}
8813+
8814+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8815+{
8816+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8817+}
8818+
8819 #include <asm-generic/iomap.h>
8820
8821 #include <linux/vmalloc.h>
8822diff -urNp linux-2.6.32.45/arch/x86/include/asm/io_64.h linux-2.6.32.45/arch/x86/include/asm/io_64.h
8823--- linux-2.6.32.45/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8824+++ linux-2.6.32.45/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8825@@ -140,6 +140,17 @@ __OUTS(l)
8826
8827 #include <linux/vmalloc.h>
8828
8829+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8830+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8831+{
8832+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8833+}
8834+
8835+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8836+{
8837+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8838+}
8839+
8840 #include <asm-generic/iomap.h>
8841
8842 void __memcpy_fromio(void *, unsigned long, unsigned);
8843diff -urNp linux-2.6.32.45/arch/x86/include/asm/iommu.h linux-2.6.32.45/arch/x86/include/asm/iommu.h
8844--- linux-2.6.32.45/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8845+++ linux-2.6.32.45/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8846@@ -3,7 +3,7 @@
8847
8848 extern void pci_iommu_shutdown(void);
8849 extern void no_iommu_init(void);
8850-extern struct dma_map_ops nommu_dma_ops;
8851+extern const struct dma_map_ops nommu_dma_ops;
8852 extern int force_iommu, no_iommu;
8853 extern int iommu_detected;
8854 extern int iommu_pass_through;
8855diff -urNp linux-2.6.32.45/arch/x86/include/asm/irqflags.h linux-2.6.32.45/arch/x86/include/asm/irqflags.h
8856--- linux-2.6.32.45/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8857+++ linux-2.6.32.45/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8858@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8859 sti; \
8860 sysexit
8861
8862+#define GET_CR0_INTO_RDI mov %cr0, %rdi
8863+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8864+#define GET_CR3_INTO_RDI mov %cr3, %rdi
8865+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8866+
8867 #else
8868 #define INTERRUPT_RETURN iret
8869 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8870diff -urNp linux-2.6.32.45/arch/x86/include/asm/kprobes.h linux-2.6.32.45/arch/x86/include/asm/kprobes.h
8871--- linux-2.6.32.45/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8872+++ linux-2.6.32.45/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8873@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8874 #define BREAKPOINT_INSTRUCTION 0xcc
8875 #define RELATIVEJUMP_INSTRUCTION 0xe9
8876 #define MAX_INSN_SIZE 16
8877-#define MAX_STACK_SIZE 64
8878-#define MIN_STACK_SIZE(ADDR) \
8879- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8880- THREAD_SIZE - (unsigned long)(ADDR))) \
8881- ? (MAX_STACK_SIZE) \
8882- : (((unsigned long)current_thread_info()) + \
8883- THREAD_SIZE - (unsigned long)(ADDR)))
8884+#define MAX_STACK_SIZE 64UL
8885+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8886
8887 #define flush_insn_slot(p) do { } while (0)
8888
8889diff -urNp linux-2.6.32.45/arch/x86/include/asm/kvm_host.h linux-2.6.32.45/arch/x86/include/asm/kvm_host.h
8890--- linux-2.6.32.45/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8891+++ linux-2.6.32.45/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8892@@ -536,7 +536,7 @@ struct kvm_x86_ops {
8893 const struct trace_print_flags *exit_reasons_str;
8894 };
8895
8896-extern struct kvm_x86_ops *kvm_x86_ops;
8897+extern const struct kvm_x86_ops *kvm_x86_ops;
8898
8899 int kvm_mmu_module_init(void);
8900 void kvm_mmu_module_exit(void);
8901diff -urNp linux-2.6.32.45/arch/x86/include/asm/local.h linux-2.6.32.45/arch/x86/include/asm/local.h
8902--- linux-2.6.32.45/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8903+++ linux-2.6.32.45/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8904@@ -18,26 +18,58 @@ typedef struct {
8905
8906 static inline void local_inc(local_t *l)
8907 {
8908- asm volatile(_ASM_INC "%0"
8909+ asm volatile(_ASM_INC "%0\n"
8910+
8911+#ifdef CONFIG_PAX_REFCOUNT
8912+ "jno 0f\n"
8913+ _ASM_DEC "%0\n"
8914+ "int $4\n0:\n"
8915+ _ASM_EXTABLE(0b, 0b)
8916+#endif
8917+
8918 : "+m" (l->a.counter));
8919 }
8920
8921 static inline void local_dec(local_t *l)
8922 {
8923- asm volatile(_ASM_DEC "%0"
8924+ asm volatile(_ASM_DEC "%0\n"
8925+
8926+#ifdef CONFIG_PAX_REFCOUNT
8927+ "jno 0f\n"
8928+ _ASM_INC "%0\n"
8929+ "int $4\n0:\n"
8930+ _ASM_EXTABLE(0b, 0b)
8931+#endif
8932+
8933 : "+m" (l->a.counter));
8934 }
8935
8936 static inline void local_add(long i, local_t *l)
8937 {
8938- asm volatile(_ASM_ADD "%1,%0"
8939+ asm volatile(_ASM_ADD "%1,%0\n"
8940+
8941+#ifdef CONFIG_PAX_REFCOUNT
8942+ "jno 0f\n"
8943+ _ASM_SUB "%1,%0\n"
8944+ "int $4\n0:\n"
8945+ _ASM_EXTABLE(0b, 0b)
8946+#endif
8947+
8948 : "+m" (l->a.counter)
8949 : "ir" (i));
8950 }
8951
8952 static inline void local_sub(long i, local_t *l)
8953 {
8954- asm volatile(_ASM_SUB "%1,%0"
8955+ asm volatile(_ASM_SUB "%1,%0\n"
8956+
8957+#ifdef CONFIG_PAX_REFCOUNT
8958+ "jno 0f\n"
8959+ _ASM_ADD "%1,%0\n"
8960+ "int $4\n0:\n"
8961+ _ASM_EXTABLE(0b, 0b)
8962+#endif
8963+
8964 : "+m" (l->a.counter)
8965 : "ir" (i));
8966 }
8967@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8968 {
8969 unsigned char c;
8970
8971- asm volatile(_ASM_SUB "%2,%0; sete %1"
8972+ asm volatile(_ASM_SUB "%2,%0\n"
8973+
8974+#ifdef CONFIG_PAX_REFCOUNT
8975+ "jno 0f\n"
8976+ _ASM_ADD "%2,%0\n"
8977+ "int $4\n0:\n"
8978+ _ASM_EXTABLE(0b, 0b)
8979+#endif
8980+
8981+ "sete %1\n"
8982 : "+m" (l->a.counter), "=qm" (c)
8983 : "ir" (i) : "memory");
8984 return c;
8985@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8986 {
8987 unsigned char c;
8988
8989- asm volatile(_ASM_DEC "%0; sete %1"
8990+ asm volatile(_ASM_DEC "%0\n"
8991+
8992+#ifdef CONFIG_PAX_REFCOUNT
8993+ "jno 0f\n"
8994+ _ASM_INC "%0\n"
8995+ "int $4\n0:\n"
8996+ _ASM_EXTABLE(0b, 0b)
8997+#endif
8998+
8999+ "sete %1\n"
9000 : "+m" (l->a.counter), "=qm" (c)
9001 : : "memory");
9002 return c != 0;
9003@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
9004 {
9005 unsigned char c;
9006
9007- asm volatile(_ASM_INC "%0; sete %1"
9008+ asm volatile(_ASM_INC "%0\n"
9009+
9010+#ifdef CONFIG_PAX_REFCOUNT
9011+ "jno 0f\n"
9012+ _ASM_DEC "%0\n"
9013+ "int $4\n0:\n"
9014+ _ASM_EXTABLE(0b, 0b)
9015+#endif
9016+
9017+ "sete %1\n"
9018 : "+m" (l->a.counter), "=qm" (c)
9019 : : "memory");
9020 return c != 0;
9021@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
9022 {
9023 unsigned char c;
9024
9025- asm volatile(_ASM_ADD "%2,%0; sets %1"
9026+ asm volatile(_ASM_ADD "%2,%0\n"
9027+
9028+#ifdef CONFIG_PAX_REFCOUNT
9029+ "jno 0f\n"
9030+ _ASM_SUB "%2,%0\n"
9031+ "int $4\n0:\n"
9032+ _ASM_EXTABLE(0b, 0b)
9033+#endif
9034+
9035+ "sets %1\n"
9036 : "+m" (l->a.counter), "=qm" (c)
9037 : "ir" (i) : "memory");
9038 return c;
9039@@ -133,7 +201,15 @@ static inline long local_add_return(long
9040 #endif
9041 /* Modern 486+ processor */
9042 __i = i;
9043- asm volatile(_ASM_XADD "%0, %1;"
9044+ asm volatile(_ASM_XADD "%0, %1\n"
9045+
9046+#ifdef CONFIG_PAX_REFCOUNT
9047+ "jno 0f\n"
9048+ _ASM_MOV "%0,%1\n"
9049+ "int $4\n0:\n"
9050+ _ASM_EXTABLE(0b, 0b)
9051+#endif
9052+
9053 : "+r" (i), "+m" (l->a.counter)
9054 : : "memory");
9055 return i + __i;
9056diff -urNp linux-2.6.32.45/arch/x86/include/asm/microcode.h linux-2.6.32.45/arch/x86/include/asm/microcode.h
9057--- linux-2.6.32.45/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
9058+++ linux-2.6.32.45/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
9059@@ -12,13 +12,13 @@ struct device;
9060 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
9061
9062 struct microcode_ops {
9063- enum ucode_state (*request_microcode_user) (int cpu,
9064+ enum ucode_state (* const request_microcode_user) (int cpu,
9065 const void __user *buf, size_t size);
9066
9067- enum ucode_state (*request_microcode_fw) (int cpu,
9068+ enum ucode_state (* const request_microcode_fw) (int cpu,
9069 struct device *device);
9070
9071- void (*microcode_fini_cpu) (int cpu);
9072+ void (* const microcode_fini_cpu) (int cpu);
9073
9074 /*
9075 * The generic 'microcode_core' part guarantees that
9076@@ -38,18 +38,18 @@ struct ucode_cpu_info {
9077 extern struct ucode_cpu_info ucode_cpu_info[];
9078
9079 #ifdef CONFIG_MICROCODE_INTEL
9080-extern struct microcode_ops * __init init_intel_microcode(void);
9081+extern const struct microcode_ops * __init init_intel_microcode(void);
9082 #else
9083-static inline struct microcode_ops * __init init_intel_microcode(void)
9084+static inline const struct microcode_ops * __init init_intel_microcode(void)
9085 {
9086 return NULL;
9087 }
9088 #endif /* CONFIG_MICROCODE_INTEL */
9089
9090 #ifdef CONFIG_MICROCODE_AMD
9091-extern struct microcode_ops * __init init_amd_microcode(void);
9092+extern const struct microcode_ops * __init init_amd_microcode(void);
9093 #else
9094-static inline struct microcode_ops * __init init_amd_microcode(void)
9095+static inline const struct microcode_ops * __init init_amd_microcode(void)
9096 {
9097 return NULL;
9098 }
9099diff -urNp linux-2.6.32.45/arch/x86/include/asm/mman.h linux-2.6.32.45/arch/x86/include/asm/mman.h
9100--- linux-2.6.32.45/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
9101+++ linux-2.6.32.45/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
9102@@ -5,4 +5,14 @@
9103
9104 #include <asm-generic/mman.h>
9105
9106+#ifdef __KERNEL__
9107+#ifndef __ASSEMBLY__
9108+#ifdef CONFIG_X86_32
9109+#define arch_mmap_check i386_mmap_check
9110+int i386_mmap_check(unsigned long addr, unsigned long len,
9111+ unsigned long flags);
9112+#endif
9113+#endif
9114+#endif
9115+
9116 #endif /* _ASM_X86_MMAN_H */
9117diff -urNp linux-2.6.32.45/arch/x86/include/asm/mmu_context.h linux-2.6.32.45/arch/x86/include/asm/mmu_context.h
9118--- linux-2.6.32.45/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
9119+++ linux-2.6.32.45/arch/x86/include/asm/mmu_context.h 2011-08-17 19:46:53.000000000 -0400
9120@@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
9121
9122 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9123 {
9124+
9125+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9126+ unsigned int i;
9127+ pgd_t *pgd;
9128+
9129+ pax_open_kernel();
9130+ pgd = get_cpu_pgd(smp_processor_id());
9131+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
9132+ if (paravirt_enabled())
9133+ set_pgd(pgd+i, native_make_pgd(0));
9134+ else
9135+ pgd[i] = native_make_pgd(0);
9136+ pax_close_kernel();
9137+#endif
9138+
9139 #ifdef CONFIG_SMP
9140 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
9141 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
9142@@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
9143 struct task_struct *tsk)
9144 {
9145 unsigned cpu = smp_processor_id();
9146+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
9147+ int tlbstate = TLBSTATE_OK;
9148+#endif
9149
9150 if (likely(prev != next)) {
9151 #ifdef CONFIG_SMP
9152+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9153+ tlbstate = percpu_read(cpu_tlbstate.state);
9154+#endif
9155 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9156 percpu_write(cpu_tlbstate.active_mm, next);
9157 #endif
9158 cpumask_set_cpu(cpu, mm_cpumask(next));
9159
9160 /* Re-load page tables */
9161+#ifdef CONFIG_PAX_PER_CPU_PGD
9162+ pax_open_kernel();
9163+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9164+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9165+ pax_close_kernel();
9166+ load_cr3(get_cpu_pgd(cpu));
9167+#else
9168 load_cr3(next->pgd);
9169+#endif
9170
9171 /* stop flush ipis for the previous mm */
9172 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9173@@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
9174 */
9175 if (unlikely(prev->context.ldt != next->context.ldt))
9176 load_LDT_nolock(&next->context);
9177- }
9178+
9179+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9180+ if (!nx_enabled) {
9181+ smp_mb__before_clear_bit();
9182+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9183+ smp_mb__after_clear_bit();
9184+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9185+ }
9186+#endif
9187+
9188+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9189+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9190+ prev->context.user_cs_limit != next->context.user_cs_limit))
9191+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9192 #ifdef CONFIG_SMP
9193+ else if (unlikely(tlbstate != TLBSTATE_OK))
9194+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9195+#endif
9196+#endif
9197+
9198+ }
9199 else {
9200+
9201+#ifdef CONFIG_PAX_PER_CPU_PGD
9202+ pax_open_kernel();
9203+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9204+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9205+ pax_close_kernel();
9206+ load_cr3(get_cpu_pgd(cpu));
9207+#endif
9208+
9209+#ifdef CONFIG_SMP
9210 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9211 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9212
9213@@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
9214 * tlb flush IPI delivery. We must reload CR3
9215 * to make sure to use no freed page tables.
9216 */
9217+
9218+#ifndef CONFIG_PAX_PER_CPU_PGD
9219 load_cr3(next->pgd);
9220+#endif
9221+
9222 load_LDT_nolock(&next->context);
9223+
9224+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9225+ if (!nx_enabled)
9226+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9227+#endif
9228+
9229+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9230+#ifdef CONFIG_PAX_PAGEEXEC
9231+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
9232+#endif
9233+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9234+#endif
9235+
9236 }
9237- }
9238 #endif
9239+ }
9240 }
9241
9242 #define activate_mm(prev, next) \
9243diff -urNp linux-2.6.32.45/arch/x86/include/asm/mmu.h linux-2.6.32.45/arch/x86/include/asm/mmu.h
9244--- linux-2.6.32.45/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
9245+++ linux-2.6.32.45/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
9246@@ -9,10 +9,23 @@
9247 * we put the segment information here.
9248 */
9249 typedef struct {
9250- void *ldt;
9251+ struct desc_struct *ldt;
9252 int size;
9253 struct mutex lock;
9254- void *vdso;
9255+ unsigned long vdso;
9256+
9257+#ifdef CONFIG_X86_32
9258+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9259+ unsigned long user_cs_base;
9260+ unsigned long user_cs_limit;
9261+
9262+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9263+ cpumask_t cpu_user_cs_mask;
9264+#endif
9265+
9266+#endif
9267+#endif
9268+
9269 } mm_context_t;
9270
9271 #ifdef CONFIG_SMP
9272diff -urNp linux-2.6.32.45/arch/x86/include/asm/module.h linux-2.6.32.45/arch/x86/include/asm/module.h
9273--- linux-2.6.32.45/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
9274+++ linux-2.6.32.45/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
9275@@ -5,6 +5,7 @@
9276
9277 #ifdef CONFIG_X86_64
9278 /* X86_64 does not define MODULE_PROC_FAMILY */
9279+#define MODULE_PROC_FAMILY ""
9280 #elif defined CONFIG_M386
9281 #define MODULE_PROC_FAMILY "386 "
9282 #elif defined CONFIG_M486
9283@@ -59,13 +60,36 @@
9284 #error unknown processor family
9285 #endif
9286
9287-#ifdef CONFIG_X86_32
9288-# ifdef CONFIG_4KSTACKS
9289-# define MODULE_STACKSIZE "4KSTACKS "
9290-# else
9291-# define MODULE_STACKSIZE ""
9292-# endif
9293-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
9294+#ifdef CONFIG_PAX_MEMORY_UDEREF
9295+#define MODULE_PAX_UDEREF "UDEREF "
9296+#else
9297+#define MODULE_PAX_UDEREF ""
9298+#endif
9299+
9300+#ifdef CONFIG_PAX_KERNEXEC
9301+#define MODULE_PAX_KERNEXEC "KERNEXEC "
9302+#else
9303+#define MODULE_PAX_KERNEXEC ""
9304+#endif
9305+
9306+#ifdef CONFIG_PAX_REFCOUNT
9307+#define MODULE_PAX_REFCOUNT "REFCOUNT "
9308+#else
9309+#define MODULE_PAX_REFCOUNT ""
9310 #endif
9311
9312+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
9313+#define MODULE_STACKSIZE "4KSTACKS "
9314+#else
9315+#define MODULE_STACKSIZE ""
9316+#endif
9317+
9318+#ifdef CONFIG_GRKERNSEC
9319+#define MODULE_GRSEC "GRSECURITY "
9320+#else
9321+#define MODULE_GRSEC ""
9322+#endif
9323+
9324+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
9325+
9326 #endif /* _ASM_X86_MODULE_H */
9327diff -urNp linux-2.6.32.45/arch/x86/include/asm/page_64_types.h linux-2.6.32.45/arch/x86/include/asm/page_64_types.h
9328--- linux-2.6.32.45/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
9329+++ linux-2.6.32.45/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
9330@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9331
9332 /* duplicated to the one in bootmem.h */
9333 extern unsigned long max_pfn;
9334-extern unsigned long phys_base;
9335+extern const unsigned long phys_base;
9336
9337 extern unsigned long __phys_addr(unsigned long);
9338 #define __phys_reloc_hide(x) (x)
9339diff -urNp linux-2.6.32.45/arch/x86/include/asm/paravirt.h linux-2.6.32.45/arch/x86/include/asm/paravirt.h
9340--- linux-2.6.32.45/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
9341+++ linux-2.6.32.45/arch/x86/include/asm/paravirt.h 2011-04-17 15:56:46.000000000 -0400
9342@@ -729,6 +729,21 @@ static inline void __set_fixmap(unsigned
9343 pv_mmu_ops.set_fixmap(idx, phys, flags);
9344 }
9345
9346+#ifdef CONFIG_PAX_KERNEXEC
9347+static inline unsigned long pax_open_kernel(void)
9348+{
9349+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9350+}
9351+
9352+static inline unsigned long pax_close_kernel(void)
9353+{
9354+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9355+}
9356+#else
9357+static inline unsigned long pax_open_kernel(void) { return 0; }
9358+static inline unsigned long pax_close_kernel(void) { return 0; }
9359+#endif
9360+
9361 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9362
9363 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9364@@ -945,7 +960,7 @@ extern void default_banner(void);
9365
9366 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9367 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9368-#define PARA_INDIRECT(addr) *%cs:addr
9369+#define PARA_INDIRECT(addr) *%ss:addr
9370 #endif
9371
9372 #define INTERRUPT_RETURN \
9373@@ -1022,6 +1037,21 @@ extern void default_banner(void);
9374 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9375 CLBR_NONE, \
9376 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9377+
9378+#define GET_CR0_INTO_RDI \
9379+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9380+ mov %rax,%rdi
9381+
9382+#define SET_RDI_INTO_CR0 \
9383+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9384+
9385+#define GET_CR3_INTO_RDI \
9386+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9387+ mov %rax,%rdi
9388+
9389+#define SET_RDI_INTO_CR3 \
9390+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9391+
9392 #endif /* CONFIG_X86_32 */
9393
9394 #endif /* __ASSEMBLY__ */
9395diff -urNp linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h
9396--- linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
9397+++ linux-2.6.32.45/arch/x86/include/asm/paravirt_types.h 2011-08-05 20:33:55.000000000 -0400
9398@@ -78,19 +78,19 @@ struct pv_init_ops {
9399 */
9400 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9401 unsigned long addr, unsigned len);
9402-};
9403+} __no_const;
9404
9405
9406 struct pv_lazy_ops {
9407 /* Set deferred update mode, used for batching operations. */
9408 void (*enter)(void);
9409 void (*leave)(void);
9410-};
9411+} __no_const;
9412
9413 struct pv_time_ops {
9414 unsigned long long (*sched_clock)(void);
9415 unsigned long (*get_tsc_khz)(void);
9416-};
9417+} __no_const;
9418
9419 struct pv_cpu_ops {
9420 /* hooks for various privileged instructions */
9421@@ -186,7 +186,7 @@ struct pv_cpu_ops {
9422
9423 void (*start_context_switch)(struct task_struct *prev);
9424 void (*end_context_switch)(struct task_struct *next);
9425-};
9426+} __no_const;
9427
9428 struct pv_irq_ops {
9429 /*
9430@@ -217,7 +217,7 @@ struct pv_apic_ops {
9431 unsigned long start_eip,
9432 unsigned long start_esp);
9433 #endif
9434-};
9435+} __no_const;
9436
9437 struct pv_mmu_ops {
9438 unsigned long (*read_cr2)(void);
9439@@ -316,6 +316,12 @@ struct pv_mmu_ops {
9440 an mfn. We can tell which is which from the index. */
9441 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9442 phys_addr_t phys, pgprot_t flags);
9443+
9444+#ifdef CONFIG_PAX_KERNEXEC
9445+ unsigned long (*pax_open_kernel)(void);
9446+ unsigned long (*pax_close_kernel)(void);
9447+#endif
9448+
9449 };
9450
9451 struct raw_spinlock;
9452@@ -326,7 +332,7 @@ struct pv_lock_ops {
9453 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
9454 int (*spin_trylock)(struct raw_spinlock *lock);
9455 void (*spin_unlock)(struct raw_spinlock *lock);
9456-};
9457+} __no_const;
9458
9459 /* This contains all the paravirt structures: we get a convenient
9460 * number for each function using the offset which we use to indicate
9461diff -urNp linux-2.6.32.45/arch/x86/include/asm/pci_x86.h linux-2.6.32.45/arch/x86/include/asm/pci_x86.h
9462--- linux-2.6.32.45/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
9463+++ linux-2.6.32.45/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
9464@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9465 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9466
9467 struct pci_raw_ops {
9468- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9469+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9470 int reg, int len, u32 *val);
9471- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9472+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9473 int reg, int len, u32 val);
9474 };
9475
9476-extern struct pci_raw_ops *raw_pci_ops;
9477-extern struct pci_raw_ops *raw_pci_ext_ops;
9478+extern const struct pci_raw_ops *raw_pci_ops;
9479+extern const struct pci_raw_ops *raw_pci_ext_ops;
9480
9481-extern struct pci_raw_ops pci_direct_conf1;
9482+extern const struct pci_raw_ops pci_direct_conf1;
9483 extern bool port_cf9_safe;
9484
9485 /* arch_initcall level */
9486diff -urNp linux-2.6.32.45/arch/x86/include/asm/percpu.h linux-2.6.32.45/arch/x86/include/asm/percpu.h
9487--- linux-2.6.32.45/arch/x86/include/asm/percpu.h 2011-03-27 14:31:47.000000000 -0400
9488+++ linux-2.6.32.45/arch/x86/include/asm/percpu.h 2011-08-17 19:33:59.000000000 -0400
9489@@ -78,6 +78,7 @@ do { \
9490 if (0) { \
9491 T__ tmp__; \
9492 tmp__ = (val); \
9493+ (void)tmp__; \
9494 } \
9495 switch (sizeof(var)) { \
9496 case 1: \
9497diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgalloc.h linux-2.6.32.45/arch/x86/include/asm/pgalloc.h
9498--- linux-2.6.32.45/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9499+++ linux-2.6.32.45/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9500@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9501 pmd_t *pmd, pte_t *pte)
9502 {
9503 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9504+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9505+}
9506+
9507+static inline void pmd_populate_user(struct mm_struct *mm,
9508+ pmd_t *pmd, pte_t *pte)
9509+{
9510+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9511 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9512 }
9513
9514diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h
9515--- linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9516+++ linux-2.6.32.45/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9517@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9518
9519 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9520 {
9521+ pax_open_kernel();
9522 *pmdp = pmd;
9523+ pax_close_kernel();
9524 }
9525
9526 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9527diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h
9528--- linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9529+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9530@@ -26,9 +26,6 @@
9531 struct mm_struct;
9532 struct vm_area_struct;
9533
9534-extern pgd_t swapper_pg_dir[1024];
9535-extern pgd_t trampoline_pg_dir[1024];
9536-
9537 static inline void pgtable_cache_init(void) { }
9538 static inline void check_pgt_cache(void) { }
9539 void paging_init(void);
9540@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9541 # include <asm/pgtable-2level.h>
9542 #endif
9543
9544+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9545+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9546+#ifdef CONFIG_X86_PAE
9547+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9548+#endif
9549+
9550 #if defined(CONFIG_HIGHPTE)
9551 #define __KM_PTE \
9552 (in_nmi() ? KM_NMI_PTE : \
9553@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9554 /* Clear a kernel PTE and flush it from the TLB */
9555 #define kpte_clear_flush(ptep, vaddr) \
9556 do { \
9557+ pax_open_kernel(); \
9558 pte_clear(&init_mm, (vaddr), (ptep)); \
9559+ pax_close_kernel(); \
9560 __flush_tlb_one((vaddr)); \
9561 } while (0)
9562
9563@@ -85,6 +90,9 @@ do { \
9564
9565 #endif /* !__ASSEMBLY__ */
9566
9567+#define HAVE_ARCH_UNMAPPED_AREA
9568+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9569+
9570 /*
9571 * kern_addr_valid() is (1) for FLATMEM and (0) for
9572 * SPARSEMEM and DISCONTIGMEM
9573diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h
9574--- linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9575+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9576@@ -8,7 +8,7 @@
9577 */
9578 #ifdef CONFIG_X86_PAE
9579 # include <asm/pgtable-3level_types.h>
9580-# define PMD_SIZE (1UL << PMD_SHIFT)
9581+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9582 # define PMD_MASK (~(PMD_SIZE - 1))
9583 #else
9584 # include <asm/pgtable-2level_types.h>
9585@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9586 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9587 #endif
9588
9589+#ifdef CONFIG_PAX_KERNEXEC
9590+#ifndef __ASSEMBLY__
9591+extern unsigned char MODULES_EXEC_VADDR[];
9592+extern unsigned char MODULES_EXEC_END[];
9593+#endif
9594+#include <asm/boot.h>
9595+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9596+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9597+#else
9598+#define ktla_ktva(addr) (addr)
9599+#define ktva_ktla(addr) (addr)
9600+#endif
9601+
9602 #define MODULES_VADDR VMALLOC_START
9603 #define MODULES_END VMALLOC_END
9604 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9605diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h
9606--- linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9607+++ linux-2.6.32.45/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9608@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9609
9610 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9611 {
9612+ pax_open_kernel();
9613 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9614+ pax_close_kernel();
9615 }
9616
9617 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9618 {
9619+ pax_open_kernel();
9620 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9621+ pax_close_kernel();
9622 }
9623
9624 /*
9625diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h
9626--- linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9627+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_64.h 2011-04-17 15:56:46.000000000 -0400
9628@@ -16,10 +16,13 @@
9629
9630 extern pud_t level3_kernel_pgt[512];
9631 extern pud_t level3_ident_pgt[512];
9632+extern pud_t level3_vmalloc_pgt[512];
9633+extern pud_t level3_vmemmap_pgt[512];
9634+extern pud_t level2_vmemmap_pgt[512];
9635 extern pmd_t level2_kernel_pgt[512];
9636 extern pmd_t level2_fixmap_pgt[512];
9637-extern pmd_t level2_ident_pgt[512];
9638-extern pgd_t init_level4_pgt[];
9639+extern pmd_t level2_ident_pgt[512*2];
9640+extern pgd_t init_level4_pgt[512];
9641
9642 #define swapper_pg_dir init_level4_pgt
9643
9644@@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9645
9646 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9647 {
9648+ pax_open_kernel();
9649 *pmdp = pmd;
9650+ pax_close_kernel();
9651 }
9652
9653 static inline void native_pmd_clear(pmd_t *pmd)
9654@@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
9655
9656 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9657 {
9658+ pax_open_kernel();
9659 *pgdp = pgd;
9660+ pax_close_kernel();
9661 }
9662
9663 static inline void native_pgd_clear(pgd_t *pgd)
9664diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h
9665--- linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9666+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9667@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9668 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9669 #define MODULES_END _AC(0xffffffffff000000, UL)
9670 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9671+#define MODULES_EXEC_VADDR MODULES_VADDR
9672+#define MODULES_EXEC_END MODULES_END
9673+
9674+#define ktla_ktva(addr) (addr)
9675+#define ktva_ktla(addr) (addr)
9676
9677 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9678diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable.h linux-2.6.32.45/arch/x86/include/asm/pgtable.h
9679--- linux-2.6.32.45/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9680+++ linux-2.6.32.45/arch/x86/include/asm/pgtable.h 2011-04-17 15:56:46.000000000 -0400
9681@@ -74,12 +74,51 @@ extern struct list_head pgd_list;
9682
9683 #define arch_end_context_switch(prev) do {} while(0)
9684
9685+#define pax_open_kernel() native_pax_open_kernel()
9686+#define pax_close_kernel() native_pax_close_kernel()
9687 #endif /* CONFIG_PARAVIRT */
9688
9689+#define __HAVE_ARCH_PAX_OPEN_KERNEL
9690+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9691+
9692+#ifdef CONFIG_PAX_KERNEXEC
9693+static inline unsigned long native_pax_open_kernel(void)
9694+{
9695+ unsigned long cr0;
9696+
9697+ preempt_disable();
9698+ barrier();
9699+ cr0 = read_cr0() ^ X86_CR0_WP;
9700+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
9701+ write_cr0(cr0);
9702+ return cr0 ^ X86_CR0_WP;
9703+}
9704+
9705+static inline unsigned long native_pax_close_kernel(void)
9706+{
9707+ unsigned long cr0;
9708+
9709+ cr0 = read_cr0() ^ X86_CR0_WP;
9710+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9711+ write_cr0(cr0);
9712+ barrier();
9713+ preempt_enable_no_resched();
9714+ return cr0 ^ X86_CR0_WP;
9715+}
9716+#else
9717+static inline unsigned long native_pax_open_kernel(void) { return 0; }
9718+static inline unsigned long native_pax_close_kernel(void) { return 0; }
9719+#endif
9720+
9721 /*
9722 * The following only work if pte_present() is true.
9723 * Undefined behaviour if not..
9724 */
9725+static inline int pte_user(pte_t pte)
9726+{
9727+ return pte_val(pte) & _PAGE_USER;
9728+}
9729+
9730 static inline int pte_dirty(pte_t pte)
9731 {
9732 return pte_flags(pte) & _PAGE_DIRTY;
9733@@ -167,9 +206,29 @@ static inline pte_t pte_wrprotect(pte_t
9734 return pte_clear_flags(pte, _PAGE_RW);
9735 }
9736
9737+static inline pte_t pte_mkread(pte_t pte)
9738+{
9739+ return __pte(pte_val(pte) | _PAGE_USER);
9740+}
9741+
9742 static inline pte_t pte_mkexec(pte_t pte)
9743 {
9744- return pte_clear_flags(pte, _PAGE_NX);
9745+#ifdef CONFIG_X86_PAE
9746+ if (__supported_pte_mask & _PAGE_NX)
9747+ return pte_clear_flags(pte, _PAGE_NX);
9748+ else
9749+#endif
9750+ return pte_set_flags(pte, _PAGE_USER);
9751+}
9752+
9753+static inline pte_t pte_exprotect(pte_t pte)
9754+{
9755+#ifdef CONFIG_X86_PAE
9756+ if (__supported_pte_mask & _PAGE_NX)
9757+ return pte_set_flags(pte, _PAGE_NX);
9758+ else
9759+#endif
9760+ return pte_clear_flags(pte, _PAGE_USER);
9761 }
9762
9763 static inline pte_t pte_mkdirty(pte_t pte)
9764@@ -302,6 +361,15 @@ pte_t *populate_extra_pte(unsigned long
9765 #endif
9766
9767 #ifndef __ASSEMBLY__
9768+
9769+#ifdef CONFIG_PAX_PER_CPU_PGD
9770+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9771+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9772+{
9773+ return cpu_pgd[cpu];
9774+}
9775+#endif
9776+
9777 #include <linux/mm_types.h>
9778
9779 static inline int pte_none(pte_t pte)
9780@@ -472,7 +540,7 @@ static inline pud_t *pud_offset(pgd_t *p
9781
9782 static inline int pgd_bad(pgd_t pgd)
9783 {
9784- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9785+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9786 }
9787
9788 static inline int pgd_none(pgd_t pgd)
9789@@ -495,7 +563,12 @@ static inline int pgd_none(pgd_t pgd)
9790 * pgd_offset() returns a (pgd_t *)
9791 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9792 */
9793-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9794+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9795+
9796+#ifdef CONFIG_PAX_PER_CPU_PGD
9797+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9798+#endif
9799+
9800 /*
9801 * a shortcut which implies the use of the kernel's pgd, instead
9802 * of a process's
9803@@ -506,6 +579,20 @@ static inline int pgd_none(pgd_t pgd)
9804 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9805 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9806
9807+#ifdef CONFIG_X86_32
9808+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9809+#else
9810+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9811+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9812+
9813+#ifdef CONFIG_PAX_MEMORY_UDEREF
9814+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9815+#else
9816+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9817+#endif
9818+
9819+#endif
9820+
9821 #ifndef __ASSEMBLY__
9822
9823 extern int direct_gbpages;
9824@@ -611,11 +698,23 @@ static inline void ptep_set_wrprotect(st
9825 * dst and src can be on the same page, but the range must not overlap,
9826 * and must not cross a page boundary.
9827 */
9828-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9829+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9830 {
9831- memcpy(dst, src, count * sizeof(pgd_t));
9832+ pax_open_kernel();
9833+ while (count--)
9834+ *dst++ = *src++;
9835+ pax_close_kernel();
9836 }
9837
9838+#ifdef CONFIG_PAX_PER_CPU_PGD
9839+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9840+#endif
9841+
9842+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9843+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9844+#else
9845+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9846+#endif
9847
9848 #include <asm-generic/pgtable.h>
9849 #endif /* __ASSEMBLY__ */
9850diff -urNp linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h
9851--- linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9852+++ linux-2.6.32.45/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9853@@ -16,12 +16,11 @@
9854 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9855 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9856 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9857-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9858+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9859 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9860 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9861 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9862-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9863-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9864+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9865 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9866
9867 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9868@@ -39,7 +38,6 @@
9869 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9870 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9871 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9872-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9873 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9874 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9875 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9876@@ -55,8 +53,10 @@
9877
9878 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9879 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9880-#else
9881+#elif defined(CONFIG_KMEMCHECK)
9882 #define _PAGE_NX (_AT(pteval_t, 0))
9883+#else
9884+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9885 #endif
9886
9887 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9888@@ -93,6 +93,9 @@
9889 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9890 _PAGE_ACCESSED)
9891
9892+#define PAGE_READONLY_NOEXEC PAGE_READONLY
9893+#define PAGE_SHARED_NOEXEC PAGE_SHARED
9894+
9895 #define __PAGE_KERNEL_EXEC \
9896 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9897 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9898@@ -103,8 +106,8 @@
9899 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9900 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9901 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9902-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9903-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9904+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9905+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9906 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9907 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9908 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9909@@ -163,8 +166,8 @@
9910 * bits are combined, this will alow user to access the high address mapped
9911 * VDSO in the presence of CONFIG_COMPAT_VDSO
9912 */
9913-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9914-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9915+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9916+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9917 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9918 #endif
9919
9920@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9921 {
9922 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9923 }
9924+#endif
9925
9926+#if PAGETABLE_LEVELS == 3
9927+#include <asm-generic/pgtable-nopud.h>
9928+#endif
9929+
9930+#if PAGETABLE_LEVELS == 2
9931+#include <asm-generic/pgtable-nopmd.h>
9932+#endif
9933+
9934+#ifndef __ASSEMBLY__
9935 #if PAGETABLE_LEVELS > 3
9936 typedef struct { pudval_t pud; } pud_t;
9937
9938@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9939 return pud.pud;
9940 }
9941 #else
9942-#include <asm-generic/pgtable-nopud.h>
9943-
9944 static inline pudval_t native_pud_val(pud_t pud)
9945 {
9946 return native_pgd_val(pud.pgd);
9947@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9948 return pmd.pmd;
9949 }
9950 #else
9951-#include <asm-generic/pgtable-nopmd.h>
9952-
9953 static inline pmdval_t native_pmd_val(pmd_t pmd)
9954 {
9955 return native_pgd_val(pmd.pud.pgd);
9956@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9957
9958 extern pteval_t __supported_pte_mask;
9959 extern void set_nx(void);
9960+
9961+#ifdef CONFIG_X86_32
9962+#ifdef CONFIG_X86_PAE
9963 extern int nx_enabled;
9964+#else
9965+#define nx_enabled (0)
9966+#endif
9967+#else
9968+#define nx_enabled (1)
9969+#endif
9970
9971 #define pgprot_writecombine pgprot_writecombine
9972 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9973diff -urNp linux-2.6.32.45/arch/x86/include/asm/processor.h linux-2.6.32.45/arch/x86/include/asm/processor.h
9974--- linux-2.6.32.45/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
9975+++ linux-2.6.32.45/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
9976@@ -272,7 +272,7 @@ struct tss_struct {
9977
9978 } ____cacheline_aligned;
9979
9980-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9981+extern struct tss_struct init_tss[NR_CPUS];
9982
9983 /*
9984 * Save the original ist values for checking stack pointers during debugging
9985@@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
9986 */
9987 #define TASK_SIZE PAGE_OFFSET
9988 #define TASK_SIZE_MAX TASK_SIZE
9989+
9990+#ifdef CONFIG_PAX_SEGMEXEC
9991+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9992+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9993+#else
9994 #define STACK_TOP TASK_SIZE
9995-#define STACK_TOP_MAX STACK_TOP
9996+#endif
9997+
9998+#define STACK_TOP_MAX TASK_SIZE
9999
10000 #define INIT_THREAD { \
10001- .sp0 = sizeof(init_stack) + (long)&init_stack, \
10002+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10003 .vm86_info = NULL, \
10004 .sysenter_cs = __KERNEL_CS, \
10005 .io_bitmap_ptr = NULL, \
10006@@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
10007 */
10008 #define INIT_TSS { \
10009 .x86_tss = { \
10010- .sp0 = sizeof(init_stack) + (long)&init_stack, \
10011+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
10012 .ss0 = __KERNEL_DS, \
10013 .ss1 = __KERNEL_CS, \
10014 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
10015@@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
10016 extern unsigned long thread_saved_pc(struct task_struct *tsk);
10017
10018 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
10019-#define KSTK_TOP(info) \
10020-({ \
10021- unsigned long *__ptr = (unsigned long *)(info); \
10022- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
10023-})
10024+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
10025
10026 /*
10027 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
10028@@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
10029 #define task_pt_regs(task) \
10030 ({ \
10031 struct pt_regs *__regs__; \
10032- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
10033+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
10034 __regs__ - 1; \
10035 })
10036
10037@@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
10038 /*
10039 * User space process size. 47bits minus one guard page.
10040 */
10041-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
10042+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
10043
10044 /* This decides where the kernel will search for a free chunk of vm
10045 * space during mmap's.
10046 */
10047 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
10048- 0xc0000000 : 0xFFFFe000)
10049+ 0xc0000000 : 0xFFFFf000)
10050
10051 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
10052 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
10053@@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
10054 #define STACK_TOP_MAX TASK_SIZE_MAX
10055
10056 #define INIT_THREAD { \
10057- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10058+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10059 }
10060
10061 #define INIT_TSS { \
10062- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
10063+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
10064 }
10065
10066 /*
10067@@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
10068 */
10069 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
10070
10071+#ifdef CONFIG_PAX_SEGMEXEC
10072+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
10073+#endif
10074+
10075 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
10076
10077 /* Get/set a process' ability to use the timestamp counter instruction */
10078diff -urNp linux-2.6.32.45/arch/x86/include/asm/ptrace.h linux-2.6.32.45/arch/x86/include/asm/ptrace.h
10079--- linux-2.6.32.45/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
10080+++ linux-2.6.32.45/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
10081@@ -151,28 +151,29 @@ static inline unsigned long regs_return_
10082 }
10083
10084 /*
10085- * user_mode_vm(regs) determines whether a register set came from user mode.
10086+ * user_mode(regs) determines whether a register set came from user mode.
10087 * This is true if V8086 mode was enabled OR if the register set was from
10088 * protected mode with RPL-3 CS value. This tricky test checks that with
10089 * one comparison. Many places in the kernel can bypass this full check
10090- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
10091+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
10092+ * be used.
10093 */
10094-static inline int user_mode(struct pt_regs *regs)
10095+static inline int user_mode_novm(struct pt_regs *regs)
10096 {
10097 #ifdef CONFIG_X86_32
10098 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
10099 #else
10100- return !!(regs->cs & 3);
10101+ return !!(regs->cs & SEGMENT_RPL_MASK);
10102 #endif
10103 }
10104
10105-static inline int user_mode_vm(struct pt_regs *regs)
10106+static inline int user_mode(struct pt_regs *regs)
10107 {
10108 #ifdef CONFIG_X86_32
10109 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
10110 USER_RPL;
10111 #else
10112- return user_mode(regs);
10113+ return user_mode_novm(regs);
10114 #endif
10115 }
10116
10117diff -urNp linux-2.6.32.45/arch/x86/include/asm/reboot.h linux-2.6.32.45/arch/x86/include/asm/reboot.h
10118--- linux-2.6.32.45/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
10119+++ linux-2.6.32.45/arch/x86/include/asm/reboot.h 2011-08-05 20:33:55.000000000 -0400
10120@@ -6,19 +6,19 @@
10121 struct pt_regs;
10122
10123 struct machine_ops {
10124- void (*restart)(char *cmd);
10125- void (*halt)(void);
10126- void (*power_off)(void);
10127+ void (* __noreturn restart)(char *cmd);
10128+ void (* __noreturn halt)(void);
10129+ void (* __noreturn power_off)(void);
10130 void (*shutdown)(void);
10131 void (*crash_shutdown)(struct pt_regs *);
10132- void (*emergency_restart)(void);
10133-};
10134+ void (* __noreturn emergency_restart)(void);
10135+} __no_const;
10136
10137 extern struct machine_ops machine_ops;
10138
10139 void native_machine_crash_shutdown(struct pt_regs *regs);
10140 void native_machine_shutdown(void);
10141-void machine_real_restart(const unsigned char *code, int length);
10142+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
10143
10144 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
10145 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
10146diff -urNp linux-2.6.32.45/arch/x86/include/asm/rwsem.h linux-2.6.32.45/arch/x86/include/asm/rwsem.h
10147--- linux-2.6.32.45/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
10148+++ linux-2.6.32.45/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
10149@@ -118,6 +118,14 @@ static inline void __down_read(struct rw
10150 {
10151 asm volatile("# beginning down_read\n\t"
10152 LOCK_PREFIX _ASM_INC "(%1)\n\t"
10153+
10154+#ifdef CONFIG_PAX_REFCOUNT
10155+ "jno 0f\n"
10156+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
10157+ "int $4\n0:\n"
10158+ _ASM_EXTABLE(0b, 0b)
10159+#endif
10160+
10161 /* adds 0x00000001, returns the old value */
10162 " jns 1f\n"
10163 " call call_rwsem_down_read_failed\n"
10164@@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
10165 "1:\n\t"
10166 " mov %1,%2\n\t"
10167 " add %3,%2\n\t"
10168+
10169+#ifdef CONFIG_PAX_REFCOUNT
10170+ "jno 0f\n"
10171+ "sub %3,%2\n"
10172+ "int $4\n0:\n"
10173+ _ASM_EXTABLE(0b, 0b)
10174+#endif
10175+
10176 " jle 2f\n\t"
10177 LOCK_PREFIX " cmpxchg %2,%0\n\t"
10178 " jnz 1b\n\t"
10179@@ -160,6 +176,14 @@ static inline void __down_write_nested(s
10180 tmp = RWSEM_ACTIVE_WRITE_BIAS;
10181 asm volatile("# beginning down_write\n\t"
10182 LOCK_PREFIX " xadd %1,(%2)\n\t"
10183+
10184+#ifdef CONFIG_PAX_REFCOUNT
10185+ "jno 0f\n"
10186+ "mov %1,(%2)\n"
10187+ "int $4\n0:\n"
10188+ _ASM_EXTABLE(0b, 0b)
10189+#endif
10190+
10191 /* subtract 0x0000ffff, returns the old value */
10192 " test %1,%1\n\t"
10193 /* was the count 0 before? */
10194@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
10195 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
10196 asm volatile("# beginning __up_read\n\t"
10197 LOCK_PREFIX " xadd %1,(%2)\n\t"
10198+
10199+#ifdef CONFIG_PAX_REFCOUNT
10200+ "jno 0f\n"
10201+ "mov %1,(%2)\n"
10202+ "int $4\n0:\n"
10203+ _ASM_EXTABLE(0b, 0b)
10204+#endif
10205+
10206 /* subtracts 1, returns the old value */
10207 " jns 1f\n\t"
10208 " call call_rwsem_wake\n"
10209@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
10210 rwsem_count_t tmp;
10211 asm volatile("# beginning __up_write\n\t"
10212 LOCK_PREFIX " xadd %1,(%2)\n\t"
10213+
10214+#ifdef CONFIG_PAX_REFCOUNT
10215+ "jno 0f\n"
10216+ "mov %1,(%2)\n"
10217+ "int $4\n0:\n"
10218+ _ASM_EXTABLE(0b, 0b)
10219+#endif
10220+
10221 /* tries to transition
10222 0xffff0001 -> 0x00000000 */
10223 " jz 1f\n"
10224@@ -234,6 +274,14 @@ static inline void __downgrade_write(str
10225 {
10226 asm volatile("# beginning __downgrade_write\n\t"
10227 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10228+
10229+#ifdef CONFIG_PAX_REFCOUNT
10230+ "jno 0f\n"
10231+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10232+ "int $4\n0:\n"
10233+ _ASM_EXTABLE(0b, 0b)
10234+#endif
10235+
10236 /*
10237 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10238 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10239@@ -253,7 +301,15 @@ static inline void __downgrade_write(str
10240 static inline void rwsem_atomic_add(rwsem_count_t delta,
10241 struct rw_semaphore *sem)
10242 {
10243- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10244+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10245+
10246+#ifdef CONFIG_PAX_REFCOUNT
10247+ "jno 0f\n"
10248+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
10249+ "int $4\n0:\n"
10250+ _ASM_EXTABLE(0b, 0b)
10251+#endif
10252+
10253 : "+m" (sem->count)
10254 : "er" (delta));
10255 }
10256@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
10257 {
10258 rwsem_count_t tmp = delta;
10259
10260- asm volatile(LOCK_PREFIX "xadd %0,%1"
10261+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
10262+
10263+#ifdef CONFIG_PAX_REFCOUNT
10264+ "jno 0f\n"
10265+ "mov %0,%1\n"
10266+ "int $4\n0:\n"
10267+ _ASM_EXTABLE(0b, 0b)
10268+#endif
10269+
10270 : "+r" (tmp), "+m" (sem->count)
10271 : : "memory");
10272
10273diff -urNp linux-2.6.32.45/arch/x86/include/asm/segment.h linux-2.6.32.45/arch/x86/include/asm/segment.h
10274--- linux-2.6.32.45/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
10275+++ linux-2.6.32.45/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
10276@@ -62,8 +62,8 @@
10277 * 26 - ESPFIX small SS
10278 * 27 - per-cpu [ offset to per-cpu data area ]
10279 * 28 - stack_canary-20 [ for stack protector ]
10280- * 29 - unused
10281- * 30 - unused
10282+ * 29 - PCI BIOS CS
10283+ * 30 - PCI BIOS DS
10284 * 31 - TSS for double fault handler
10285 */
10286 #define GDT_ENTRY_TLS_MIN 6
10287@@ -77,6 +77,8 @@
10288
10289 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
10290
10291+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10292+
10293 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
10294
10295 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
10296@@ -88,7 +90,7 @@
10297 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
10298 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
10299
10300-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10301+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10302 #ifdef CONFIG_SMP
10303 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
10304 #else
10305@@ -102,6 +104,12 @@
10306 #define __KERNEL_STACK_CANARY 0
10307 #endif
10308
10309+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
10310+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10311+
10312+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
10313+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10314+
10315 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10316
10317 /*
10318@@ -139,7 +147,7 @@
10319 */
10320
10321 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10322-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10323+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10324
10325
10326 #else
10327@@ -163,6 +171,8 @@
10328 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
10329 #define __USER32_DS __USER_DS
10330
10331+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10332+
10333 #define GDT_ENTRY_TSS 8 /* needs two entries */
10334 #define GDT_ENTRY_LDT 10 /* needs two entries */
10335 #define GDT_ENTRY_TLS_MIN 12
10336@@ -183,6 +193,7 @@
10337 #endif
10338
10339 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
10340+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
10341 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
10342 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
10343 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
10344diff -urNp linux-2.6.32.45/arch/x86/include/asm/smp.h linux-2.6.32.45/arch/x86/include/asm/smp.h
10345--- linux-2.6.32.45/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
10346+++ linux-2.6.32.45/arch/x86/include/asm/smp.h 2011-08-05 20:33:55.000000000 -0400
10347@@ -24,7 +24,7 @@ extern unsigned int num_processors;
10348 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
10349 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10350 DECLARE_PER_CPU(u16, cpu_llc_id);
10351-DECLARE_PER_CPU(int, cpu_number);
10352+DECLARE_PER_CPU(unsigned int, cpu_number);
10353
10354 static inline struct cpumask *cpu_sibling_mask(int cpu)
10355 {
10356@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_ap
10357 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
10358
10359 /* Static state in head.S used to set up a CPU */
10360-extern struct {
10361- void *sp;
10362- unsigned short ss;
10363-} stack_start;
10364+extern unsigned long stack_start; /* Initial stack pointer address */
10365
10366 struct smp_ops {
10367 void (*smp_prepare_boot_cpu)(void);
10368@@ -60,7 +57,7 @@ struct smp_ops {
10369
10370 void (*send_call_func_ipi)(const struct cpumask *mask);
10371 void (*send_call_func_single_ipi)(int cpu);
10372-};
10373+} __no_const;
10374
10375 /* Globals due to paravirt */
10376 extern void set_cpu_sibling_map(int cpu);
10377@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitd
10378 extern int safe_smp_processor_id(void);
10379
10380 #elif defined(CONFIG_X86_64_SMP)
10381-#define raw_smp_processor_id() (percpu_read(cpu_number))
10382-
10383-#define stack_smp_processor_id() \
10384-({ \
10385- struct thread_info *ti; \
10386- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10387- ti->cpu; \
10388-})
10389+#define raw_smp_processor_id() (percpu_read(cpu_number))
10390+#define stack_smp_processor_id() raw_smp_processor_id()
10391 #define safe_smp_processor_id() smp_processor_id()
10392
10393 #endif
10394diff -urNp linux-2.6.32.45/arch/x86/include/asm/spinlock.h linux-2.6.32.45/arch/x86/include/asm/spinlock.h
10395--- linux-2.6.32.45/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
10396+++ linux-2.6.32.45/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
10397@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
10398 static inline void __raw_read_lock(raw_rwlock_t *rw)
10399 {
10400 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
10401+
10402+#ifdef CONFIG_PAX_REFCOUNT
10403+ "jno 0f\n"
10404+ LOCK_PREFIX " addl $1,(%0)\n"
10405+ "int $4\n0:\n"
10406+ _ASM_EXTABLE(0b, 0b)
10407+#endif
10408+
10409 "jns 1f\n"
10410 "call __read_lock_failed\n\t"
10411 "1:\n"
10412@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
10413 static inline void __raw_write_lock(raw_rwlock_t *rw)
10414 {
10415 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
10416+
10417+#ifdef CONFIG_PAX_REFCOUNT
10418+ "jno 0f\n"
10419+ LOCK_PREFIX " addl %1,(%0)\n"
10420+ "int $4\n0:\n"
10421+ _ASM_EXTABLE(0b, 0b)
10422+#endif
10423+
10424 "jz 1f\n"
10425 "call __write_lock_failed\n\t"
10426 "1:\n"
10427@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10428
10429 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10430 {
10431- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10432+ asm volatile(LOCK_PREFIX "incl %0\n"
10433+
10434+#ifdef CONFIG_PAX_REFCOUNT
10435+ "jno 0f\n"
10436+ LOCK_PREFIX "decl %0\n"
10437+ "int $4\n0:\n"
10438+ _ASM_EXTABLE(0b, 0b)
10439+#endif
10440+
10441+ :"+m" (rw->lock) : : "memory");
10442 }
10443
10444 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10445 {
10446- asm volatile(LOCK_PREFIX "addl %1, %0"
10447+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
10448+
10449+#ifdef CONFIG_PAX_REFCOUNT
10450+ "jno 0f\n"
10451+ LOCK_PREFIX "subl %1, %0\n"
10452+ "int $4\n0:\n"
10453+ _ASM_EXTABLE(0b, 0b)
10454+#endif
10455+
10456 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10457 }
10458
10459diff -urNp linux-2.6.32.45/arch/x86/include/asm/stackprotector.h linux-2.6.32.45/arch/x86/include/asm/stackprotector.h
10460--- linux-2.6.32.45/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
10461+++ linux-2.6.32.45/arch/x86/include/asm/stackprotector.h 2011-07-06 19:53:33.000000000 -0400
10462@@ -48,7 +48,7 @@
10463 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10464 */
10465 #define GDT_STACK_CANARY_INIT \
10466- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10467+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10468
10469 /*
10470 * Initialize the stackprotector canary value.
10471@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10472
10473 static inline void load_stack_canary_segment(void)
10474 {
10475-#ifdef CONFIG_X86_32
10476+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10477 asm volatile ("mov %0, %%gs" : : "r" (0));
10478 #endif
10479 }
10480diff -urNp linux-2.6.32.45/arch/x86/include/asm/system.h linux-2.6.32.45/arch/x86/include/asm/system.h
10481--- linux-2.6.32.45/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
10482+++ linux-2.6.32.45/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
10483@@ -132,7 +132,7 @@ do { \
10484 "thread_return:\n\t" \
10485 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10486 __switch_canary \
10487- "movq %P[thread_info](%%rsi),%%r8\n\t" \
10488+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10489 "movq %%rax,%%rdi\n\t" \
10490 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10491 "jnz ret_from_fork\n\t" \
10492@@ -143,7 +143,7 @@ do { \
10493 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10494 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10495 [_tif_fork] "i" (_TIF_FORK), \
10496- [thread_info] "i" (offsetof(struct task_struct, stack)), \
10497+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
10498 [current_task] "m" (per_cpu_var(current_task)) \
10499 __switch_canary_iparam \
10500 : "memory", "cc" __EXTRA_CLOBBER)
10501@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10502 {
10503 unsigned long __limit;
10504 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10505- return __limit + 1;
10506+ return __limit;
10507 }
10508
10509 static inline void native_clts(void)
10510@@ -340,12 +340,12 @@ void enable_hlt(void);
10511
10512 void cpu_idle_wait(void);
10513
10514-extern unsigned long arch_align_stack(unsigned long sp);
10515+#define arch_align_stack(x) ((x) & ~0xfUL)
10516 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10517
10518 void default_idle(void);
10519
10520-void stop_this_cpu(void *dummy);
10521+void stop_this_cpu(void *dummy) __noreturn;
10522
10523 /*
10524 * Force strict CPU ordering.
10525diff -urNp linux-2.6.32.45/arch/x86/include/asm/thread_info.h linux-2.6.32.45/arch/x86/include/asm/thread_info.h
10526--- linux-2.6.32.45/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10527+++ linux-2.6.32.45/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10528@@ -10,6 +10,7 @@
10529 #include <linux/compiler.h>
10530 #include <asm/page.h>
10531 #include <asm/types.h>
10532+#include <asm/percpu.h>
10533
10534 /*
10535 * low level task data that entry.S needs immediate access to
10536@@ -24,7 +25,6 @@ struct exec_domain;
10537 #include <asm/atomic.h>
10538
10539 struct thread_info {
10540- struct task_struct *task; /* main task structure */
10541 struct exec_domain *exec_domain; /* execution domain */
10542 __u32 flags; /* low level flags */
10543 __u32 status; /* thread synchronous flags */
10544@@ -34,18 +34,12 @@ struct thread_info {
10545 mm_segment_t addr_limit;
10546 struct restart_block restart_block;
10547 void __user *sysenter_return;
10548-#ifdef CONFIG_X86_32
10549- unsigned long previous_esp; /* ESP of the previous stack in
10550- case of nested (IRQ) stacks
10551- */
10552- __u8 supervisor_stack[0];
10553-#endif
10554+ unsigned long lowest_stack;
10555 int uaccess_err;
10556 };
10557
10558-#define INIT_THREAD_INFO(tsk) \
10559+#define INIT_THREAD_INFO \
10560 { \
10561- .task = &tsk, \
10562 .exec_domain = &default_exec_domain, \
10563 .flags = 0, \
10564 .cpu = 0, \
10565@@ -56,7 +50,7 @@ struct thread_info {
10566 }, \
10567 }
10568
10569-#define init_thread_info (init_thread_union.thread_info)
10570+#define init_thread_info (init_thread_union.stack)
10571 #define init_stack (init_thread_union.stack)
10572
10573 #else /* !__ASSEMBLY__ */
10574@@ -163,6 +157,23 @@ struct thread_info {
10575 #define alloc_thread_info(tsk) \
10576 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10577
10578+#ifdef __ASSEMBLY__
10579+/* how to get the thread information struct from ASM */
10580+#define GET_THREAD_INFO(reg) \
10581+ mov PER_CPU_VAR(current_tinfo), reg
10582+
10583+/* use this one if reg already contains %esp */
10584+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10585+#else
10586+/* how to get the thread information struct from C */
10587+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10588+
10589+static __always_inline struct thread_info *current_thread_info(void)
10590+{
10591+ return percpu_read_stable(current_tinfo);
10592+}
10593+#endif
10594+
10595 #ifdef CONFIG_X86_32
10596
10597 #define STACK_WARN (THREAD_SIZE/8)
10598@@ -173,35 +184,13 @@ struct thread_info {
10599 */
10600 #ifndef __ASSEMBLY__
10601
10602-
10603 /* how to get the current stack pointer from C */
10604 register unsigned long current_stack_pointer asm("esp") __used;
10605
10606-/* how to get the thread information struct from C */
10607-static inline struct thread_info *current_thread_info(void)
10608-{
10609- return (struct thread_info *)
10610- (current_stack_pointer & ~(THREAD_SIZE - 1));
10611-}
10612-
10613-#else /* !__ASSEMBLY__ */
10614-
10615-/* how to get the thread information struct from ASM */
10616-#define GET_THREAD_INFO(reg) \
10617- movl $-THREAD_SIZE, reg; \
10618- andl %esp, reg
10619-
10620-/* use this one if reg already contains %esp */
10621-#define GET_THREAD_INFO_WITH_ESP(reg) \
10622- andl $-THREAD_SIZE, reg
10623-
10624 #endif
10625
10626 #else /* X86_32 */
10627
10628-#include <asm/percpu.h>
10629-#define KERNEL_STACK_OFFSET (5*8)
10630-
10631 /*
10632 * macros/functions for gaining access to the thread information structure
10633 * preempt_count needs to be 1 initially, until the scheduler is functional.
10634@@ -209,21 +198,8 @@ static inline struct thread_info *curren
10635 #ifndef __ASSEMBLY__
10636 DECLARE_PER_CPU(unsigned long, kernel_stack);
10637
10638-static inline struct thread_info *current_thread_info(void)
10639-{
10640- struct thread_info *ti;
10641- ti = (void *)(percpu_read_stable(kernel_stack) +
10642- KERNEL_STACK_OFFSET - THREAD_SIZE);
10643- return ti;
10644-}
10645-
10646-#else /* !__ASSEMBLY__ */
10647-
10648-/* how to get the thread information struct from ASM */
10649-#define GET_THREAD_INFO(reg) \
10650- movq PER_CPU_VAR(kernel_stack),reg ; \
10651- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10652-
10653+/* how to get the current stack pointer from C */
10654+register unsigned long current_stack_pointer asm("rsp") __used;
10655 #endif
10656
10657 #endif /* !X86_32 */
10658@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10659 extern void free_thread_info(struct thread_info *ti);
10660 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10661 #define arch_task_cache_init arch_task_cache_init
10662+
10663+#define __HAVE_THREAD_FUNCTIONS
10664+#define task_thread_info(task) (&(task)->tinfo)
10665+#define task_stack_page(task) ((task)->stack)
10666+#define setup_thread_stack(p, org) do {} while (0)
10667+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10668+
10669+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10670+extern struct task_struct *alloc_task_struct(void);
10671+extern void free_task_struct(struct task_struct *);
10672+
10673 #endif
10674 #endif /* _ASM_X86_THREAD_INFO_H */
10675diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h
10676--- linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10677+++ linux-2.6.32.45/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10678@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10679 static __always_inline unsigned long __must_check
10680 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10681 {
10682+ pax_track_stack();
10683+
10684+ if ((long)n < 0)
10685+ return n;
10686+
10687 if (__builtin_constant_p(n)) {
10688 unsigned long ret;
10689
10690@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10691 return ret;
10692 }
10693 }
10694+ if (!__builtin_constant_p(n))
10695+ check_object_size(from, n, true);
10696 return __copy_to_user_ll(to, from, n);
10697 }
10698
10699@@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10700 __copy_to_user(void __user *to, const void *from, unsigned long n)
10701 {
10702 might_fault();
10703+
10704 return __copy_to_user_inatomic(to, from, n);
10705 }
10706
10707 static __always_inline unsigned long
10708 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10709 {
10710+ if ((long)n < 0)
10711+ return n;
10712+
10713 /* Avoid zeroing the tail if the copy fails..
10714 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10715 * but as the zeroing behaviour is only significant when n is not
10716@@ -138,6 +149,12 @@ static __always_inline unsigned long
10717 __copy_from_user(void *to, const void __user *from, unsigned long n)
10718 {
10719 might_fault();
10720+
10721+ pax_track_stack();
10722+
10723+ if ((long)n < 0)
10724+ return n;
10725+
10726 if (__builtin_constant_p(n)) {
10727 unsigned long ret;
10728
10729@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10730 return ret;
10731 }
10732 }
10733+ if (!__builtin_constant_p(n))
10734+ check_object_size(to, n, false);
10735 return __copy_from_user_ll(to, from, n);
10736 }
10737
10738@@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10739 const void __user *from, unsigned long n)
10740 {
10741 might_fault();
10742+
10743+ if ((long)n < 0)
10744+ return n;
10745+
10746 if (__builtin_constant_p(n)) {
10747 unsigned long ret;
10748
10749@@ -182,14 +205,62 @@ static __always_inline unsigned long
10750 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10751 unsigned long n)
10752 {
10753- return __copy_from_user_ll_nocache_nozero(to, from, n);
10754+ if ((long)n < 0)
10755+ return n;
10756+
10757+ return __copy_from_user_ll_nocache_nozero(to, from, n);
10758+}
10759+
10760+/**
10761+ * copy_to_user: - Copy a block of data into user space.
10762+ * @to: Destination address, in user space.
10763+ * @from: Source address, in kernel space.
10764+ * @n: Number of bytes to copy.
10765+ *
10766+ * Context: User context only. This function may sleep.
10767+ *
10768+ * Copy data from kernel space to user space.
10769+ *
10770+ * Returns number of bytes that could not be copied.
10771+ * On success, this will be zero.
10772+ */
10773+static __always_inline unsigned long __must_check
10774+copy_to_user(void __user *to, const void *from, unsigned long n)
10775+{
10776+ if (access_ok(VERIFY_WRITE, to, n))
10777+ n = __copy_to_user(to, from, n);
10778+ return n;
10779+}
10780+
10781+/**
10782+ * copy_from_user: - Copy a block of data from user space.
10783+ * @to: Destination address, in kernel space.
10784+ * @from: Source address, in user space.
10785+ * @n: Number of bytes to copy.
10786+ *
10787+ * Context: User context only. This function may sleep.
10788+ *
10789+ * Copy data from user space to kernel space.
10790+ *
10791+ * Returns number of bytes that could not be copied.
10792+ * On success, this will be zero.
10793+ *
10794+ * If some data could not be copied, this function will pad the copied
10795+ * data to the requested size using zero bytes.
10796+ */
10797+static __always_inline unsigned long __must_check
10798+copy_from_user(void *to, const void __user *from, unsigned long n)
10799+{
10800+ if (access_ok(VERIFY_READ, from, n))
10801+ n = __copy_from_user(to, from, n);
10802+ else if ((long)n > 0) {
10803+ if (!__builtin_constant_p(n))
10804+ check_object_size(to, n, false);
10805+ memset(to, 0, n);
10806+ }
10807+ return n;
10808 }
10809
10810-unsigned long __must_check copy_to_user(void __user *to,
10811- const void *from, unsigned long n);
10812-unsigned long __must_check copy_from_user(void *to,
10813- const void __user *from,
10814- unsigned long n);
10815 long __must_check strncpy_from_user(char *dst, const char __user *src,
10816 long count);
10817 long __must_check __strncpy_from_user(char *dst,
10818diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h
10819--- linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10820+++ linux-2.6.32.45/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10821@@ -9,6 +9,9 @@
10822 #include <linux/prefetch.h>
10823 #include <linux/lockdep.h>
10824 #include <asm/page.h>
10825+#include <asm/pgtable.h>
10826+
10827+#define set_fs(x) (current_thread_info()->addr_limit = (x))
10828
10829 /*
10830 * Copy To/From Userspace
10831@@ -19,113 +22,203 @@ __must_check unsigned long
10832 copy_user_generic(void *to, const void *from, unsigned len);
10833
10834 __must_check unsigned long
10835-copy_to_user(void __user *to, const void *from, unsigned len);
10836-__must_check unsigned long
10837-copy_from_user(void *to, const void __user *from, unsigned len);
10838-__must_check unsigned long
10839 copy_in_user(void __user *to, const void __user *from, unsigned len);
10840
10841 static __always_inline __must_check
10842-int __copy_from_user(void *dst, const void __user *src, unsigned size)
10843+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10844 {
10845- int ret = 0;
10846+ unsigned ret = 0;
10847
10848 might_fault();
10849- if (!__builtin_constant_p(size))
10850- return copy_user_generic(dst, (__force void *)src, size);
10851+
10852+ if ((int)size < 0)
10853+ return size;
10854+
10855+#ifdef CONFIG_PAX_MEMORY_UDEREF
10856+ if (!__access_ok(VERIFY_READ, src, size))
10857+ return size;
10858+#endif
10859+
10860+ if (!__builtin_constant_p(size)) {
10861+ check_object_size(dst, size, false);
10862+
10863+#ifdef CONFIG_PAX_MEMORY_UDEREF
10864+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10865+ src += PAX_USER_SHADOW_BASE;
10866+#endif
10867+
10868+ return copy_user_generic(dst, (__force const void *)src, size);
10869+ }
10870 switch (size) {
10871- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10872+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10873 ret, "b", "b", "=q", 1);
10874 return ret;
10875- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10876+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10877 ret, "w", "w", "=r", 2);
10878 return ret;
10879- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10880+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10881 ret, "l", "k", "=r", 4);
10882 return ret;
10883- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10884+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10885 ret, "q", "", "=r", 8);
10886 return ret;
10887 case 10:
10888- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10889+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10890 ret, "q", "", "=r", 10);
10891 if (unlikely(ret))
10892 return ret;
10893 __get_user_asm(*(u16 *)(8 + (char *)dst),
10894- (u16 __user *)(8 + (char __user *)src),
10895+ (const u16 __user *)(8 + (const char __user *)src),
10896 ret, "w", "w", "=r", 2);
10897 return ret;
10898 case 16:
10899- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10900+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10901 ret, "q", "", "=r", 16);
10902 if (unlikely(ret))
10903 return ret;
10904 __get_user_asm(*(u64 *)(8 + (char *)dst),
10905- (u64 __user *)(8 + (char __user *)src),
10906+ (const u64 __user *)(8 + (const char __user *)src),
10907 ret, "q", "", "=r", 8);
10908 return ret;
10909 default:
10910- return copy_user_generic(dst, (__force void *)src, size);
10911+
10912+#ifdef CONFIG_PAX_MEMORY_UDEREF
10913+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10914+ src += PAX_USER_SHADOW_BASE;
10915+#endif
10916+
10917+ return copy_user_generic(dst, (__force const void *)src, size);
10918 }
10919 }
10920
10921 static __always_inline __must_check
10922-int __copy_to_user(void __user *dst, const void *src, unsigned size)
10923+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10924 {
10925- int ret = 0;
10926+ unsigned ret = 0;
10927
10928 might_fault();
10929- if (!__builtin_constant_p(size))
10930+
10931+ pax_track_stack();
10932+
10933+ if ((int)size < 0)
10934+ return size;
10935+
10936+#ifdef CONFIG_PAX_MEMORY_UDEREF
10937+ if (!__access_ok(VERIFY_WRITE, dst, size))
10938+ return size;
10939+#endif
10940+
10941+ if (!__builtin_constant_p(size)) {
10942+ check_object_size(src, size, true);
10943+
10944+#ifdef CONFIG_PAX_MEMORY_UDEREF
10945+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10946+ dst += PAX_USER_SHADOW_BASE;
10947+#endif
10948+
10949 return copy_user_generic((__force void *)dst, src, size);
10950+ }
10951 switch (size) {
10952- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10953+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10954 ret, "b", "b", "iq", 1);
10955 return ret;
10956- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10957+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10958 ret, "w", "w", "ir", 2);
10959 return ret;
10960- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10961+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10962 ret, "l", "k", "ir", 4);
10963 return ret;
10964- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10965+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10966 ret, "q", "", "er", 8);
10967 return ret;
10968 case 10:
10969- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10970+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10971 ret, "q", "", "er", 10);
10972 if (unlikely(ret))
10973 return ret;
10974 asm("":::"memory");
10975- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10976+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10977 ret, "w", "w", "ir", 2);
10978 return ret;
10979 case 16:
10980- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10981+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10982 ret, "q", "", "er", 16);
10983 if (unlikely(ret))
10984 return ret;
10985 asm("":::"memory");
10986- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10987+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10988 ret, "q", "", "er", 8);
10989 return ret;
10990 default:
10991+
10992+#ifdef CONFIG_PAX_MEMORY_UDEREF
10993+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10994+ dst += PAX_USER_SHADOW_BASE;
10995+#endif
10996+
10997 return copy_user_generic((__force void *)dst, src, size);
10998 }
10999 }
11000
11001 static __always_inline __must_check
11002-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11003+unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
11004+{
11005+ if (access_ok(VERIFY_WRITE, to, len))
11006+ len = __copy_to_user(to, from, len);
11007+ return len;
11008+}
11009+
11010+static __always_inline __must_check
11011+unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
11012+{
11013+ if ((int)len < 0)
11014+ return len;
11015+
11016+ if (access_ok(VERIFY_READ, from, len))
11017+ len = __copy_from_user(to, from, len);
11018+ else if ((int)len > 0) {
11019+ if (!__builtin_constant_p(len))
11020+ check_object_size(to, len, false);
11021+ memset(to, 0, len);
11022+ }
11023+ return len;
11024+}
11025+
11026+static __always_inline __must_check
11027+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11028 {
11029- int ret = 0;
11030+ unsigned ret = 0;
11031
11032 might_fault();
11033- if (!__builtin_constant_p(size))
11034+
11035+ pax_track_stack();
11036+
11037+ if ((int)size < 0)
11038+ return size;
11039+
11040+#ifdef CONFIG_PAX_MEMORY_UDEREF
11041+ if (!__access_ok(VERIFY_READ, src, size))
11042+ return size;
11043+ if (!__access_ok(VERIFY_WRITE, dst, size))
11044+ return size;
11045+#endif
11046+
11047+ if (!__builtin_constant_p(size)) {
11048+
11049+#ifdef CONFIG_PAX_MEMORY_UDEREF
11050+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11051+ src += PAX_USER_SHADOW_BASE;
11052+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11053+ dst += PAX_USER_SHADOW_BASE;
11054+#endif
11055+
11056 return copy_user_generic((__force void *)dst,
11057- (__force void *)src, size);
11058+ (__force const void *)src, size);
11059+ }
11060 switch (size) {
11061 case 1: {
11062 u8 tmp;
11063- __get_user_asm(tmp, (u8 __user *)src,
11064+ __get_user_asm(tmp, (const u8 __user *)src,
11065 ret, "b", "b", "=q", 1);
11066 if (likely(!ret))
11067 __put_user_asm(tmp, (u8 __user *)dst,
11068@@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
11069 }
11070 case 2: {
11071 u16 tmp;
11072- __get_user_asm(tmp, (u16 __user *)src,
11073+ __get_user_asm(tmp, (const u16 __user *)src,
11074 ret, "w", "w", "=r", 2);
11075 if (likely(!ret))
11076 __put_user_asm(tmp, (u16 __user *)dst,
11077@@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
11078
11079 case 4: {
11080 u32 tmp;
11081- __get_user_asm(tmp, (u32 __user *)src,
11082+ __get_user_asm(tmp, (const u32 __user *)src,
11083 ret, "l", "k", "=r", 4);
11084 if (likely(!ret))
11085 __put_user_asm(tmp, (u32 __user *)dst,
11086@@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
11087 }
11088 case 8: {
11089 u64 tmp;
11090- __get_user_asm(tmp, (u64 __user *)src,
11091+ __get_user_asm(tmp, (const u64 __user *)src,
11092 ret, "q", "", "=r", 8);
11093 if (likely(!ret))
11094 __put_user_asm(tmp, (u64 __user *)dst,
11095@@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
11096 return ret;
11097 }
11098 default:
11099+
11100+#ifdef CONFIG_PAX_MEMORY_UDEREF
11101+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11102+ src += PAX_USER_SHADOW_BASE;
11103+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11104+ dst += PAX_USER_SHADOW_BASE;
11105+#endif
11106+
11107 return copy_user_generic((__force void *)dst,
11108- (__force void *)src, size);
11109+ (__force const void *)src, size);
11110 }
11111 }
11112
11113@@ -176,33 +277,75 @@ __must_check long strlen_user(const char
11114 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11115 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11116
11117-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
11118- unsigned size);
11119+static __must_check __always_inline unsigned long
11120+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11121+{
11122+ pax_track_stack();
11123+
11124+ if ((int)size < 0)
11125+ return size;
11126
11127-static __must_check __always_inline int
11128+#ifdef CONFIG_PAX_MEMORY_UDEREF
11129+ if (!__access_ok(VERIFY_READ, src, size))
11130+ return size;
11131+
11132+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11133+ src += PAX_USER_SHADOW_BASE;
11134+#endif
11135+
11136+ return copy_user_generic(dst, (__force const void *)src, size);
11137+}
11138+
11139+static __must_check __always_inline unsigned long
11140 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11141 {
11142+ if ((int)size < 0)
11143+ return size;
11144+
11145+#ifdef CONFIG_PAX_MEMORY_UDEREF
11146+ if (!__access_ok(VERIFY_WRITE, dst, size))
11147+ return size;
11148+
11149+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11150+ dst += PAX_USER_SHADOW_BASE;
11151+#endif
11152+
11153 return copy_user_generic((__force void *)dst, src, size);
11154 }
11155
11156-extern long __copy_user_nocache(void *dst, const void __user *src,
11157+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11158 unsigned size, int zerorest);
11159
11160-static inline int
11161-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11162+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11163 {
11164 might_sleep();
11165+
11166+ if ((int)size < 0)
11167+ return size;
11168+
11169+#ifdef CONFIG_PAX_MEMORY_UDEREF
11170+ if (!__access_ok(VERIFY_READ, src, size))
11171+ return size;
11172+#endif
11173+
11174 return __copy_user_nocache(dst, src, size, 1);
11175 }
11176
11177-static inline int
11178-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11179+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11180 unsigned size)
11181 {
11182+ if ((int)size < 0)
11183+ return size;
11184+
11185+#ifdef CONFIG_PAX_MEMORY_UDEREF
11186+ if (!__access_ok(VERIFY_READ, src, size))
11187+ return size;
11188+#endif
11189+
11190 return __copy_user_nocache(dst, src, size, 0);
11191 }
11192
11193-unsigned long
11194+extern unsigned long
11195 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11196
11197 #endif /* _ASM_X86_UACCESS_64_H */
11198diff -urNp linux-2.6.32.45/arch/x86/include/asm/uaccess.h linux-2.6.32.45/arch/x86/include/asm/uaccess.h
11199--- linux-2.6.32.45/arch/x86/include/asm/uaccess.h 2011-06-25 12:55:34.000000000 -0400
11200+++ linux-2.6.32.45/arch/x86/include/asm/uaccess.h 2011-06-25 12:56:37.000000000 -0400
11201@@ -8,12 +8,15 @@
11202 #include <linux/thread_info.h>
11203 #include <linux/prefetch.h>
11204 #include <linux/string.h>
11205+#include <linux/sched.h>
11206 #include <asm/asm.h>
11207 #include <asm/page.h>
11208
11209 #define VERIFY_READ 0
11210 #define VERIFY_WRITE 1
11211
11212+extern void check_object_size(const void *ptr, unsigned long n, bool to);
11213+
11214 /*
11215 * The fs value determines whether argument validity checking should be
11216 * performed or not. If get_fs() == USER_DS, checking is performed, with
11217@@ -29,7 +32,12 @@
11218
11219 #define get_ds() (KERNEL_DS)
11220 #define get_fs() (current_thread_info()->addr_limit)
11221+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11222+void __set_fs(mm_segment_t x);
11223+void set_fs(mm_segment_t x);
11224+#else
11225 #define set_fs(x) (current_thread_info()->addr_limit = (x))
11226+#endif
11227
11228 #define segment_eq(a, b) ((a).seg == (b).seg)
11229
11230@@ -77,7 +85,33 @@
11231 * checks that the pointer is in the user space range - after calling
11232 * this function, memory access functions may still return -EFAULT.
11233 */
11234-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11235+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11236+#define access_ok(type, addr, size) \
11237+({ \
11238+ long __size = size; \
11239+ unsigned long __addr = (unsigned long)addr; \
11240+ unsigned long __addr_ao = __addr & PAGE_MASK; \
11241+ unsigned long __end_ao = __addr + __size - 1; \
11242+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
11243+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
11244+ while(__addr_ao <= __end_ao) { \
11245+ char __c_ao; \
11246+ __addr_ao += PAGE_SIZE; \
11247+ if (__size > PAGE_SIZE) \
11248+ cond_resched(); \
11249+ if (__get_user(__c_ao, (char __user *)__addr)) \
11250+ break; \
11251+ if (type != VERIFY_WRITE) { \
11252+ __addr = __addr_ao; \
11253+ continue; \
11254+ } \
11255+ if (__put_user(__c_ao, (char __user *)__addr)) \
11256+ break; \
11257+ __addr = __addr_ao; \
11258+ } \
11259+ } \
11260+ __ret_ao; \
11261+})
11262
11263 /*
11264 * The exception table consists of pairs of addresses: the first is the
11265@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
11266 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
11267 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
11268
11269-
11270+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11271+#define __copyuser_seg "gs;"
11272+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
11273+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
11274+#else
11275+#define __copyuser_seg
11276+#define __COPYUSER_SET_ES
11277+#define __COPYUSER_RESTORE_ES
11278+#endif
11279
11280 #ifdef CONFIG_X86_32
11281 #define __put_user_asm_u64(x, addr, err, errret) \
11282- asm volatile("1: movl %%eax,0(%2)\n" \
11283- "2: movl %%edx,4(%2)\n" \
11284+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
11285+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
11286 "3:\n" \
11287 ".section .fixup,\"ax\"\n" \
11288 "4: movl %3,%0\n" \
11289@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
11290 : "A" (x), "r" (addr), "i" (errret), "0" (err))
11291
11292 #define __put_user_asm_ex_u64(x, addr) \
11293- asm volatile("1: movl %%eax,0(%1)\n" \
11294- "2: movl %%edx,4(%1)\n" \
11295+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
11296+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
11297 "3:\n" \
11298 _ASM_EXTABLE(1b, 2b - 1b) \
11299 _ASM_EXTABLE(2b, 3b - 2b) \
11300@@ -374,7 +416,7 @@ do { \
11301 } while (0)
11302
11303 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11304- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
11305+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
11306 "2:\n" \
11307 ".section .fixup,\"ax\"\n" \
11308 "3: mov %3,%0\n" \
11309@@ -382,7 +424,7 @@ do { \
11310 " jmp 2b\n" \
11311 ".previous\n" \
11312 _ASM_EXTABLE(1b, 3b) \
11313- : "=r" (err), ltype(x) \
11314+ : "=r" (err), ltype (x) \
11315 : "m" (__m(addr)), "i" (errret), "0" (err))
11316
11317 #define __get_user_size_ex(x, ptr, size) \
11318@@ -407,7 +449,7 @@ do { \
11319 } while (0)
11320
11321 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
11322- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
11323+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
11324 "2:\n" \
11325 _ASM_EXTABLE(1b, 2b - 1b) \
11326 : ltype(x) : "m" (__m(addr)))
11327@@ -424,13 +466,24 @@ do { \
11328 int __gu_err; \
11329 unsigned long __gu_val; \
11330 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11331- (x) = (__force __typeof__(*(ptr)))__gu_val; \
11332+ (x) = (__typeof__(*(ptr)))__gu_val; \
11333 __gu_err; \
11334 })
11335
11336 /* FIXME: this hack is definitely wrong -AK */
11337 struct __large_struct { unsigned long buf[100]; };
11338-#define __m(x) (*(struct __large_struct __user *)(x))
11339+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11340+#define ____m(x) \
11341+({ \
11342+ unsigned long ____x = (unsigned long)(x); \
11343+ if (____x < PAX_USER_SHADOW_BASE) \
11344+ ____x += PAX_USER_SHADOW_BASE; \
11345+ (void __user *)____x; \
11346+})
11347+#else
11348+#define ____m(x) (x)
11349+#endif
11350+#define __m(x) (*(struct __large_struct __user *)____m(x))
11351
11352 /*
11353 * Tell gcc we read from memory instead of writing: this is because
11354@@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
11355 * aliasing issues.
11356 */
11357 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11358- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
11359+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
11360 "2:\n" \
11361 ".section .fixup,\"ax\"\n" \
11362 "3: mov %3,%0\n" \
11363@@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
11364 ".previous\n" \
11365 _ASM_EXTABLE(1b, 3b) \
11366 : "=r"(err) \
11367- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
11368+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
11369
11370 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
11371- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
11372+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
11373 "2:\n" \
11374 _ASM_EXTABLE(1b, 2b - 1b) \
11375 : : ltype(x), "m" (__m(addr)))
11376@@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
11377 * On error, the variable @x is set to zero.
11378 */
11379
11380+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11381+#define __get_user(x, ptr) get_user((x), (ptr))
11382+#else
11383 #define __get_user(x, ptr) \
11384 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
11385+#endif
11386
11387 /**
11388 * __put_user: - Write a simple value into user space, with less checking.
11389@@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
11390 * Returns zero on success, or -EFAULT on error.
11391 */
11392
11393+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11394+#define __put_user(x, ptr) put_user((x), (ptr))
11395+#else
11396 #define __put_user(x, ptr) \
11397 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
11398+#endif
11399
11400 #define __get_user_unaligned __get_user
11401 #define __put_user_unaligned __put_user
11402@@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
11403 #define get_user_ex(x, ptr) do { \
11404 unsigned long __gue_val; \
11405 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
11406- (x) = (__force __typeof__(*(ptr)))__gue_val; \
11407+ (x) = (__typeof__(*(ptr)))__gue_val; \
11408 } while (0)
11409
11410 #ifdef CONFIG_X86_WP_WORKS_OK
11411@@ -567,6 +628,7 @@ extern struct movsl_mask {
11412
11413 #define ARCH_HAS_NOCACHE_UACCESS 1
11414
11415+#define ARCH_HAS_SORT_EXTABLE
11416 #ifdef CONFIG_X86_32
11417 # include "uaccess_32.h"
11418 #else
11419diff -urNp linux-2.6.32.45/arch/x86/include/asm/vgtod.h linux-2.6.32.45/arch/x86/include/asm/vgtod.h
11420--- linux-2.6.32.45/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
11421+++ linux-2.6.32.45/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
11422@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
11423 int sysctl_enabled;
11424 struct timezone sys_tz;
11425 struct { /* extract of a clocksource struct */
11426+ char name[8];
11427 cycle_t (*vread)(void);
11428 cycle_t cycle_last;
11429 cycle_t mask;
11430diff -urNp linux-2.6.32.45/arch/x86/include/asm/vmi.h linux-2.6.32.45/arch/x86/include/asm/vmi.h
11431--- linux-2.6.32.45/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
11432+++ linux-2.6.32.45/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
11433@@ -191,6 +191,7 @@ struct vrom_header {
11434 u8 reserved[96]; /* Reserved for headers */
11435 char vmi_init[8]; /* VMI_Init jump point */
11436 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11437+ char rom_data[8048]; /* rest of the option ROM */
11438 } __attribute__((packed));
11439
11440 struct pnp_header {
11441diff -urNp linux-2.6.32.45/arch/x86/include/asm/vmi_time.h linux-2.6.32.45/arch/x86/include/asm/vmi_time.h
11442--- linux-2.6.32.45/arch/x86/include/asm/vmi_time.h 2011-03-27 14:31:47.000000000 -0400
11443+++ linux-2.6.32.45/arch/x86/include/asm/vmi_time.h 2011-08-05 20:33:55.000000000 -0400
11444@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
11445 int (*wallclock_updated)(void);
11446 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
11447 void (*cancel_alarm)(u32 flags);
11448-} vmi_timer_ops;
11449+} __no_const vmi_timer_ops;
11450
11451 /* Prototypes */
11452 extern void __init vmi_time_init(void);
11453diff -urNp linux-2.6.32.45/arch/x86/include/asm/vsyscall.h linux-2.6.32.45/arch/x86/include/asm/vsyscall.h
11454--- linux-2.6.32.45/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
11455+++ linux-2.6.32.45/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
11456@@ -15,9 +15,10 @@ enum vsyscall_num {
11457
11458 #ifdef __KERNEL__
11459 #include <linux/seqlock.h>
11460+#include <linux/getcpu.h>
11461+#include <linux/time.h>
11462
11463 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11464-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11465
11466 /* Definitions for CONFIG_GENERIC_TIME definitions */
11467 #define __section_vsyscall_gtod_data __attribute__ \
11468@@ -31,7 +32,6 @@ enum vsyscall_num {
11469 #define VGETCPU_LSL 2
11470
11471 extern int __vgetcpu_mode;
11472-extern volatile unsigned long __jiffies;
11473
11474 /* kernel space (writeable) */
11475 extern int vgetcpu_mode;
11476@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11477
11478 extern void map_vsyscall(void);
11479
11480+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11481+extern time_t vtime(time_t *t);
11482+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11483 #endif /* __KERNEL__ */
11484
11485 #endif /* _ASM_X86_VSYSCALL_H */
11486diff -urNp linux-2.6.32.45/arch/x86/include/asm/x86_init.h linux-2.6.32.45/arch/x86/include/asm/x86_init.h
11487--- linux-2.6.32.45/arch/x86/include/asm/x86_init.h 2011-03-27 14:31:47.000000000 -0400
11488+++ linux-2.6.32.45/arch/x86/include/asm/x86_init.h 2011-08-05 20:33:55.000000000 -0400
11489@@ -28,7 +28,7 @@ struct x86_init_mpparse {
11490 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11491 void (*find_smp_config)(unsigned int reserve);
11492 void (*get_smp_config)(unsigned int early);
11493-};
11494+} __no_const;
11495
11496 /**
11497 * struct x86_init_resources - platform specific resource related ops
11498@@ -42,7 +42,7 @@ struct x86_init_resources {
11499 void (*probe_roms)(void);
11500 void (*reserve_resources)(void);
11501 char *(*memory_setup)(void);
11502-};
11503+} __no_const;
11504
11505 /**
11506 * struct x86_init_irqs - platform specific interrupt setup
11507@@ -55,7 +55,7 @@ struct x86_init_irqs {
11508 void (*pre_vector_init)(void);
11509 void (*intr_init)(void);
11510 void (*trap_init)(void);
11511-};
11512+} __no_const;
11513
11514 /**
11515 * struct x86_init_oem - oem platform specific customizing functions
11516@@ -65,7 +65,7 @@ struct x86_init_irqs {
11517 struct x86_init_oem {
11518 void (*arch_setup)(void);
11519 void (*banner)(void);
11520-};
11521+} __no_const;
11522
11523 /**
11524 * struct x86_init_paging - platform specific paging functions
11525@@ -75,7 +75,7 @@ struct x86_init_oem {
11526 struct x86_init_paging {
11527 void (*pagetable_setup_start)(pgd_t *base);
11528 void (*pagetable_setup_done)(pgd_t *base);
11529-};
11530+} __no_const;
11531
11532 /**
11533 * struct x86_init_timers - platform specific timer setup
11534@@ -88,7 +88,7 @@ struct x86_init_timers {
11535 void (*setup_percpu_clockev)(void);
11536 void (*tsc_pre_init)(void);
11537 void (*timer_init)(void);
11538-};
11539+} __no_const;
11540
11541 /**
11542 * struct x86_init_ops - functions for platform specific setup
11543@@ -101,7 +101,7 @@ struct x86_init_ops {
11544 struct x86_init_oem oem;
11545 struct x86_init_paging paging;
11546 struct x86_init_timers timers;
11547-};
11548+} __no_const;
11549
11550 /**
11551 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11552@@ -109,7 +109,7 @@ struct x86_init_ops {
11553 */
11554 struct x86_cpuinit_ops {
11555 void (*setup_percpu_clockev)(void);
11556-};
11557+} __no_const;
11558
11559 /**
11560 * struct x86_platform_ops - platform specific runtime functions
11561@@ -121,7 +121,7 @@ struct x86_platform_ops {
11562 unsigned long (*calibrate_tsc)(void);
11563 unsigned long (*get_wallclock)(void);
11564 int (*set_wallclock)(unsigned long nowtime);
11565-};
11566+} __no_const;
11567
11568 extern struct x86_init_ops x86_init;
11569 extern struct x86_cpuinit_ops x86_cpuinit;
11570diff -urNp linux-2.6.32.45/arch/x86/include/asm/xsave.h linux-2.6.32.45/arch/x86/include/asm/xsave.h
11571--- linux-2.6.32.45/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
11572+++ linux-2.6.32.45/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
11573@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11574 static inline int xsave_user(struct xsave_struct __user *buf)
11575 {
11576 int err;
11577+
11578+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11579+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11580+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11581+#endif
11582+
11583 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11584 "2:\n"
11585 ".section .fixup,\"ax\"\n"
11586@@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
11587 u32 lmask = mask;
11588 u32 hmask = mask >> 32;
11589
11590+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11591+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11592+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11593+#endif
11594+
11595 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11596 "2:\n"
11597 ".section .fixup,\"ax\"\n"
11598diff -urNp linux-2.6.32.45/arch/x86/Kconfig linux-2.6.32.45/arch/x86/Kconfig
11599--- linux-2.6.32.45/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
11600+++ linux-2.6.32.45/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
11601@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11602
11603 config X86_32_LAZY_GS
11604 def_bool y
11605- depends on X86_32 && !CC_STACKPROTECTOR
11606+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11607
11608 config KTIME_SCALAR
11609 def_bool X86_32
11610@@ -1008,7 +1008,7 @@ choice
11611
11612 config NOHIGHMEM
11613 bool "off"
11614- depends on !X86_NUMAQ
11615+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11616 ---help---
11617 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11618 However, the address space of 32-bit x86 processors is only 4
11619@@ -1045,7 +1045,7 @@ config NOHIGHMEM
11620
11621 config HIGHMEM4G
11622 bool "4GB"
11623- depends on !X86_NUMAQ
11624+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11625 ---help---
11626 Select this if you have a 32-bit processor and between 1 and 4
11627 gigabytes of physical RAM.
11628@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11629 hex
11630 default 0xB0000000 if VMSPLIT_3G_OPT
11631 default 0x80000000 if VMSPLIT_2G
11632- default 0x78000000 if VMSPLIT_2G_OPT
11633+ default 0x70000000 if VMSPLIT_2G_OPT
11634 default 0x40000000 if VMSPLIT_1G
11635 default 0xC0000000
11636 depends on X86_32
11637@@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11638
11639 config EFI
11640 bool "EFI runtime service support"
11641- depends on ACPI
11642+ depends on ACPI && !PAX_KERNEXEC
11643 ---help---
11644 This enables the kernel to use EFI runtime services that are
11645 available (such as the EFI variable services).
11646@@ -1460,6 +1460,7 @@ config SECCOMP
11647
11648 config CC_STACKPROTECTOR
11649 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11650+ depends on X86_64 || !PAX_MEMORY_UDEREF
11651 ---help---
11652 This option turns on the -fstack-protector GCC feature. This
11653 feature puts, at the beginning of functions, a canary value on
11654@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11655 config PHYSICAL_START
11656 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11657 default "0x1000000"
11658+ range 0x400000 0x40000000
11659 ---help---
11660 This gives the physical address where the kernel is loaded.
11661
11662@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11663 hex
11664 prompt "Alignment value to which kernel should be aligned" if X86_32
11665 default "0x1000000"
11666+ range 0x400000 0x1000000 if PAX_KERNEXEC
11667 range 0x2000 0x1000000
11668 ---help---
11669 This value puts the alignment restrictions on physical address
11670@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11671 Say N if you want to disable CPU hotplug.
11672
11673 config COMPAT_VDSO
11674- def_bool y
11675+ def_bool n
11676 prompt "Compat VDSO support"
11677 depends on X86_32 || IA32_EMULATION
11678+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11679 ---help---
11680 Map the 32-bit VDSO to the predictable old-style address too.
11681 ---help---
11682diff -urNp linux-2.6.32.45/arch/x86/Kconfig.cpu linux-2.6.32.45/arch/x86/Kconfig.cpu
11683--- linux-2.6.32.45/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11684+++ linux-2.6.32.45/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11685@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11686
11687 config X86_F00F_BUG
11688 def_bool y
11689- depends on M586MMX || M586TSC || M586 || M486 || M386
11690+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11691
11692 config X86_WP_WORKS_OK
11693 def_bool y
11694@@ -360,7 +360,7 @@ config X86_POPAD_OK
11695
11696 config X86_ALIGNMENT_16
11697 def_bool y
11698- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11699+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11700
11701 config X86_INTEL_USERCOPY
11702 def_bool y
11703@@ -406,7 +406,7 @@ config X86_CMPXCHG64
11704 # generates cmov.
11705 config X86_CMOV
11706 def_bool y
11707- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11708+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11709
11710 config X86_MINIMUM_CPU_FAMILY
11711 int
11712diff -urNp linux-2.6.32.45/arch/x86/Kconfig.debug linux-2.6.32.45/arch/x86/Kconfig.debug
11713--- linux-2.6.32.45/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11714+++ linux-2.6.32.45/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11715@@ -99,7 +99,7 @@ config X86_PTDUMP
11716 config DEBUG_RODATA
11717 bool "Write protect kernel read-only data structures"
11718 default y
11719- depends on DEBUG_KERNEL
11720+ depends on DEBUG_KERNEL && BROKEN
11721 ---help---
11722 Mark the kernel read-only data as write-protected in the pagetables,
11723 in order to catch accidental (and incorrect) writes to such const
11724diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile
11725--- linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile 2011-03-27 14:31:47.000000000 -0400
11726+++ linux-2.6.32.45/arch/x86/kernel/acpi/realmode/Makefile 2011-08-07 14:38:58.000000000 -0400
11727@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
11728 $(call cc-option, -fno-stack-protector) \
11729 $(call cc-option, -mpreferred-stack-boundary=2)
11730 KBUILD_CFLAGS += $(call cc-option, -m32)
11731+ifdef CONSTIFY_PLUGIN
11732+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11733+endif
11734 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11735 GCOV_PROFILE := n
11736
11737diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S
11738--- linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11739+++ linux-2.6.32.45/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-01 18:53:40.000000000 -0400
11740@@ -91,6 +91,9 @@ _start:
11741 /* Do any other stuff... */
11742
11743 #ifndef CONFIG_64BIT
11744+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
11745+ call verify_cpu
11746+
11747 /* This could also be done in C code... */
11748 movl pmode_cr3, %eax
11749 movl %eax, %cr3
11750@@ -104,7 +107,7 @@ _start:
11751 movl %eax, %ecx
11752 orl %edx, %ecx
11753 jz 1f
11754- movl $0xc0000080, %ecx
11755+ mov $MSR_EFER, %ecx
11756 wrmsr
11757 1:
11758
11759@@ -114,6 +117,7 @@ _start:
11760 movl pmode_cr0, %eax
11761 movl %eax, %cr0
11762 jmp pmode_return
11763+# include "../../verify_cpu.S"
11764 #else
11765 pushw $0
11766 pushw trampoline_segment
11767diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c
11768--- linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11769+++ linux-2.6.32.45/arch/x86/kernel/acpi/sleep.c 2011-07-01 19:01:34.000000000 -0400
11770@@ -11,11 +11,12 @@
11771 #include <linux/cpumask.h>
11772 #include <asm/segment.h>
11773 #include <asm/desc.h>
11774+#include <asm/e820.h>
11775
11776 #include "realmode/wakeup.h"
11777 #include "sleep.h"
11778
11779-unsigned long acpi_wakeup_address;
11780+unsigned long acpi_wakeup_address = 0x2000;
11781 unsigned long acpi_realmode_flags;
11782
11783 /* address in low memory of the wakeup routine. */
11784@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
11785 #else /* CONFIG_64BIT */
11786 header->trampoline_segment = setup_trampoline() >> 4;
11787 #ifdef CONFIG_SMP
11788- stack_start.sp = temp_stack + sizeof(temp_stack);
11789+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11790+
11791+ pax_open_kernel();
11792 early_gdt_descr.address =
11793 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11794+ pax_close_kernel();
11795+
11796 initial_gs = per_cpu_offset(smp_processor_id());
11797 #endif
11798 initial_code = (unsigned long)wakeup_long64;
11799@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11800 return;
11801 }
11802
11803- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11804-
11805- if (!acpi_realmode) {
11806- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11807- return;
11808- }
11809-
11810- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11811+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11812+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11813 }
11814
11815
11816diff -urNp linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S
11817--- linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11818+++ linux-2.6.32.45/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11819@@ -30,13 +30,11 @@ wakeup_pmode_return:
11820 # and restore the stack ... but you need gdt for this to work
11821 movl saved_context_esp, %esp
11822
11823- movl %cs:saved_magic, %eax
11824- cmpl $0x12345678, %eax
11825+ cmpl $0x12345678, saved_magic
11826 jne bogus_magic
11827
11828 # jump to place where we left off
11829- movl saved_eip, %eax
11830- jmp *%eax
11831+ jmp *(saved_eip)
11832
11833 bogus_magic:
11834 jmp bogus_magic
11835diff -urNp linux-2.6.32.45/arch/x86/kernel/alternative.c linux-2.6.32.45/arch/x86/kernel/alternative.c
11836--- linux-2.6.32.45/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11837+++ linux-2.6.32.45/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11838@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11839
11840 BUG_ON(p->len > MAX_PATCH_LEN);
11841 /* prep the buffer with the original instructions */
11842- memcpy(insnbuf, p->instr, p->len);
11843+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11844 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11845 (unsigned long)p->instr, p->len);
11846
11847@@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11848 if (smp_alt_once)
11849 free_init_pages("SMP alternatives",
11850 (unsigned long)__smp_locks,
11851- (unsigned long)__smp_locks_end);
11852+ PAGE_ALIGN((unsigned long)__smp_locks_end));
11853
11854 restart_nmi();
11855 }
11856@@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11857 * instructions. And on the local CPU you need to be protected again NMI or MCE
11858 * handlers seeing an inconsistent instruction while you patch.
11859 */
11860-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11861+static void *__kprobes text_poke_early(void *addr, const void *opcode,
11862 size_t len)
11863 {
11864 unsigned long flags;
11865 local_irq_save(flags);
11866- memcpy(addr, opcode, len);
11867+
11868+ pax_open_kernel();
11869+ memcpy(ktla_ktva(addr), opcode, len);
11870 sync_core();
11871+ pax_close_kernel();
11872+
11873 local_irq_restore(flags);
11874 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11875 that causes hangs on some VIA CPUs. */
11876@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11877 */
11878 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11879 {
11880- unsigned long flags;
11881- char *vaddr;
11882+ unsigned char *vaddr = ktla_ktva(addr);
11883 struct page *pages[2];
11884- int i;
11885+ size_t i;
11886
11887 if (!core_kernel_text((unsigned long)addr)) {
11888- pages[0] = vmalloc_to_page(addr);
11889- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11890+ pages[0] = vmalloc_to_page(vaddr);
11891+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11892 } else {
11893- pages[0] = virt_to_page(addr);
11894+ pages[0] = virt_to_page(vaddr);
11895 WARN_ON(!PageReserved(pages[0]));
11896- pages[1] = virt_to_page(addr + PAGE_SIZE);
11897+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11898 }
11899 BUG_ON(!pages[0]);
11900- local_irq_save(flags);
11901- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11902- if (pages[1])
11903- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11904- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11905- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11906- clear_fixmap(FIX_TEXT_POKE0);
11907- if (pages[1])
11908- clear_fixmap(FIX_TEXT_POKE1);
11909- local_flush_tlb();
11910- sync_core();
11911- /* Could also do a CLFLUSH here to speed up CPU recovery; but
11912- that causes hangs on some VIA CPUs. */
11913+ text_poke_early(addr, opcode, len);
11914 for (i = 0; i < len; i++)
11915- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11916- local_irq_restore(flags);
11917+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11918 return addr;
11919 }
11920diff -urNp linux-2.6.32.45/arch/x86/kernel/amd_iommu.c linux-2.6.32.45/arch/x86/kernel/amd_iommu.c
11921--- linux-2.6.32.45/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11922+++ linux-2.6.32.45/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11923@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11924 }
11925 }
11926
11927-static struct dma_map_ops amd_iommu_dma_ops = {
11928+static const struct dma_map_ops amd_iommu_dma_ops = {
11929 .alloc_coherent = alloc_coherent,
11930 .free_coherent = free_coherent,
11931 .map_page = map_page,
11932diff -urNp linux-2.6.32.45/arch/x86/kernel/apic/apic.c linux-2.6.32.45/arch/x86/kernel/apic/apic.c
11933--- linux-2.6.32.45/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11934+++ linux-2.6.32.45/arch/x86/kernel/apic/apic.c 2011-08-17 20:00:16.000000000 -0400
11935@@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
11936 /*
11937 * Debug level, exported for io_apic.c
11938 */
11939-unsigned int apic_verbosity;
11940+int apic_verbosity;
11941
11942 int pic_mode;
11943
11944@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11945 apic_write(APIC_ESR, 0);
11946 v1 = apic_read(APIC_ESR);
11947 ack_APIC_irq();
11948- atomic_inc(&irq_err_count);
11949+ atomic_inc_unchecked(&irq_err_count);
11950
11951 /*
11952 * Here is what the APIC error bits mean:
11953@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11954 u16 *bios_cpu_apicid;
11955 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11956
11957+ pax_track_stack();
11958+
11959 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11960 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11961
11962diff -urNp linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c
11963--- linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
11964+++ linux-2.6.32.45/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
11965@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
11966 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
11967 GFP_ATOMIC);
11968 if (!ioapic_entries)
11969- return 0;
11970+ return NULL;
11971
11972 for (apic = 0; apic < nr_ioapics; apic++) {
11973 ioapic_entries[apic] =
11974@@ -733,7 +733,7 @@ nomem:
11975 kfree(ioapic_entries[apic]);
11976 kfree(ioapic_entries);
11977
11978- return 0;
11979+ return NULL;
11980 }
11981
11982 /*
11983@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
11984 }
11985 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11986
11987-void lock_vector_lock(void)
11988+void lock_vector_lock(void) __acquires(vector_lock)
11989 {
11990 /* Used to the online set of cpus does not change
11991 * during assign_irq_vector.
11992@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
11993 spin_lock(&vector_lock);
11994 }
11995
11996-void unlock_vector_lock(void)
11997+void unlock_vector_lock(void) __releases(vector_lock)
11998 {
11999 spin_unlock(&vector_lock);
12000 }
12001@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
12002 ack_APIC_irq();
12003 }
12004
12005-atomic_t irq_mis_count;
12006+atomic_unchecked_t irq_mis_count;
12007
12008 static void ack_apic_level(unsigned int irq)
12009 {
12010@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
12011
12012 /* Tail end of version 0x11 I/O APIC bug workaround */
12013 if (!(v & (1 << (i & 0x1f)))) {
12014- atomic_inc(&irq_mis_count);
12015+ atomic_inc_unchecked(&irq_mis_count);
12016 spin_lock(&ioapic_lock);
12017 __mask_and_edge_IO_APIC_irq(cfg);
12018 __unmask_and_level_IO_APIC_irq(cfg);
12019diff -urNp linux-2.6.32.45/arch/x86/kernel/apm_32.c linux-2.6.32.45/arch/x86/kernel/apm_32.c
12020--- linux-2.6.32.45/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
12021+++ linux-2.6.32.45/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
12022@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
12023 * This is for buggy BIOS's that refer to (real mode) segment 0x40
12024 * even though they are called in protected mode.
12025 */
12026-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
12027+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
12028 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
12029
12030 static const char driver_version[] = "1.16ac"; /* no spaces */
12031@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
12032 BUG_ON(cpu != 0);
12033 gdt = get_cpu_gdt_table(cpu);
12034 save_desc_40 = gdt[0x40 / 8];
12035+
12036+ pax_open_kernel();
12037 gdt[0x40 / 8] = bad_bios_desc;
12038+ pax_close_kernel();
12039
12040 apm_irq_save(flags);
12041 APM_DO_SAVE_SEGS;
12042@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
12043 &call->esi);
12044 APM_DO_RESTORE_SEGS;
12045 apm_irq_restore(flags);
12046+
12047+ pax_open_kernel();
12048 gdt[0x40 / 8] = save_desc_40;
12049+ pax_close_kernel();
12050+
12051 put_cpu();
12052
12053 return call->eax & 0xff;
12054@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
12055 BUG_ON(cpu != 0);
12056 gdt = get_cpu_gdt_table(cpu);
12057 save_desc_40 = gdt[0x40 / 8];
12058+
12059+ pax_open_kernel();
12060 gdt[0x40 / 8] = bad_bios_desc;
12061+ pax_close_kernel();
12062
12063 apm_irq_save(flags);
12064 APM_DO_SAVE_SEGS;
12065@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
12066 &call->eax);
12067 APM_DO_RESTORE_SEGS;
12068 apm_irq_restore(flags);
12069+
12070+ pax_open_kernel();
12071 gdt[0x40 / 8] = save_desc_40;
12072+ pax_close_kernel();
12073+
12074 put_cpu();
12075 return error;
12076 }
12077@@ -975,7 +989,7 @@ recalc:
12078
12079 static void apm_power_off(void)
12080 {
12081- unsigned char po_bios_call[] = {
12082+ const unsigned char po_bios_call[] = {
12083 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
12084 0x8e, 0xd0, /* movw ax,ss */
12085 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
12086@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
12087 * code to that CPU.
12088 */
12089 gdt = get_cpu_gdt_table(0);
12090+
12091+ pax_open_kernel();
12092 set_desc_base(&gdt[APM_CS >> 3],
12093 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
12094 set_desc_base(&gdt[APM_CS_16 >> 3],
12095 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
12096 set_desc_base(&gdt[APM_DS >> 3],
12097 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
12098+ pax_close_kernel();
12099
12100 proc_create("apm", 0, NULL, &apm_file_ops);
12101
12102diff -urNp linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c
12103--- linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
12104+++ linux-2.6.32.45/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
12105@@ -51,7 +51,6 @@ void foo(void)
12106 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
12107 BLANK();
12108
12109- OFFSET(TI_task, thread_info, task);
12110 OFFSET(TI_exec_domain, thread_info, exec_domain);
12111 OFFSET(TI_flags, thread_info, flags);
12112 OFFSET(TI_status, thread_info, status);
12113@@ -60,6 +59,8 @@ void foo(void)
12114 OFFSET(TI_restart_block, thread_info, restart_block);
12115 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
12116 OFFSET(TI_cpu, thread_info, cpu);
12117+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
12118+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12119 BLANK();
12120
12121 OFFSET(GDS_size, desc_ptr, size);
12122@@ -99,6 +100,7 @@ void foo(void)
12123
12124 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12125 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
12126+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12127 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
12128 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
12129 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
12130@@ -115,6 +117,11 @@ void foo(void)
12131 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
12132 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12133 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12134+
12135+#ifdef CONFIG_PAX_KERNEXEC
12136+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12137+#endif
12138+
12139 #endif
12140
12141 #ifdef CONFIG_XEN
12142diff -urNp linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c
12143--- linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
12144+++ linux-2.6.32.45/arch/x86/kernel/asm-offsets_64.c 2011-05-16 21:46:57.000000000 -0400
12145@@ -44,6 +44,8 @@ int main(void)
12146 ENTRY(addr_limit);
12147 ENTRY(preempt_count);
12148 ENTRY(status);
12149+ ENTRY(lowest_stack);
12150+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12151 #ifdef CONFIG_IA32_EMULATION
12152 ENTRY(sysenter_return);
12153 #endif
12154@@ -63,6 +65,18 @@ int main(void)
12155 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12156 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
12157 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
12158+
12159+#ifdef CONFIG_PAX_KERNEXEC
12160+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12161+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12162+#endif
12163+
12164+#ifdef CONFIG_PAX_MEMORY_UDEREF
12165+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
12166+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
12167+ OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
12168+#endif
12169+
12170 #endif
12171
12172
12173@@ -115,6 +129,7 @@ int main(void)
12174 ENTRY(cr8);
12175 BLANK();
12176 #undef ENTRY
12177+ DEFINE(TSS_size, sizeof(struct tss_struct));
12178 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
12179 BLANK();
12180 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
12181@@ -130,6 +145,7 @@ int main(void)
12182
12183 BLANK();
12184 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12185+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12186 #ifdef CONFIG_XEN
12187 BLANK();
12188 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
12189diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/amd.c linux-2.6.32.45/arch/x86/kernel/cpu/amd.c
12190--- linux-2.6.32.45/arch/x86/kernel/cpu/amd.c 2011-06-25 12:55:34.000000000 -0400
12191+++ linux-2.6.32.45/arch/x86/kernel/cpu/amd.c 2011-06-25 12:56:37.000000000 -0400
12192@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
12193 unsigned int size)
12194 {
12195 /* AMD errata T13 (order #21922) */
12196- if ((c->x86 == 6)) {
12197+ if (c->x86 == 6) {
12198 /* Duron Rev A0 */
12199 if (c->x86_model == 3 && c->x86_mask == 0)
12200 size = 64;
12201diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/common.c linux-2.6.32.45/arch/x86/kernel/cpu/common.c
12202--- linux-2.6.32.45/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
12203+++ linux-2.6.32.45/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
12204@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
12205
12206 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
12207
12208-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
12209-#ifdef CONFIG_X86_64
12210- /*
12211- * We need valid kernel segments for data and code in long mode too
12212- * IRET will check the segment types kkeil 2000/10/28
12213- * Also sysret mandates a special GDT layout
12214- *
12215- * TLS descriptors are currently at a different place compared to i386.
12216- * Hopefully nobody expects them at a fixed place (Wine?)
12217- */
12218- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
12219- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
12220- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
12221- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
12222- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
12223- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
12224-#else
12225- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
12226- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12227- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
12228- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
12229- /*
12230- * Segments used for calling PnP BIOS have byte granularity.
12231- * They code segments and data segments have fixed 64k limits,
12232- * the transfer segment sizes are set at run time.
12233- */
12234- /* 32-bit code */
12235- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12236- /* 16-bit code */
12237- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12238- /* 16-bit data */
12239- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
12240- /* 16-bit data */
12241- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
12242- /* 16-bit data */
12243- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
12244- /*
12245- * The APM segments have byte granularity and their bases
12246- * are set at run time. All have 64k limits.
12247- */
12248- /* 32-bit code */
12249- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12250- /* 16-bit code */
12251- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12252- /* data */
12253- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
12254-
12255- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12256- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12257- GDT_STACK_CANARY_INIT
12258-#endif
12259-} };
12260-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12261-
12262 static int __init x86_xsave_setup(char *s)
12263 {
12264 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12265@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
12266 {
12267 struct desc_ptr gdt_descr;
12268
12269- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12270+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12271 gdt_descr.size = GDT_SIZE - 1;
12272 load_gdt(&gdt_descr);
12273 /* Reload the per-cpu base */
12274@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
12275 /* Filter out anything that depends on CPUID levels we don't have */
12276 filter_cpuid_features(c, true);
12277
12278+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
12279+ setup_clear_cpu_cap(X86_FEATURE_SEP);
12280+#endif
12281+
12282 /* If the model name is still unset, do table lookup. */
12283 if (!c->x86_model_id[0]) {
12284 const char *p;
12285@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
12286 }
12287 __setup("clearcpuid=", setup_disablecpuid);
12288
12289+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12290+EXPORT_PER_CPU_SYMBOL(current_tinfo);
12291+
12292 #ifdef CONFIG_X86_64
12293 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12294
12295@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
12296 EXPORT_PER_CPU_SYMBOL(current_task);
12297
12298 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12299- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12300+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12301 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12302
12303 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12304@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
12305 {
12306 memset(regs, 0, sizeof(struct pt_regs));
12307 regs->fs = __KERNEL_PERCPU;
12308- regs->gs = __KERNEL_STACK_CANARY;
12309+ savesegment(gs, regs->gs);
12310
12311 return regs;
12312 }
12313@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
12314 int i;
12315
12316 cpu = stack_smp_processor_id();
12317- t = &per_cpu(init_tss, cpu);
12318+ t = init_tss + cpu;
12319 orig_ist = &per_cpu(orig_ist, cpu);
12320
12321 #ifdef CONFIG_NUMA
12322@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
12323 switch_to_new_gdt(cpu);
12324 loadsegment(fs, 0);
12325
12326- load_idt((const struct desc_ptr *)&idt_descr);
12327+ load_idt(&idt_descr);
12328
12329 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12330 syscall_init();
12331@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
12332 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12333 barrier();
12334
12335- check_efer();
12336 if (cpu != 0)
12337 enable_x2apic();
12338
12339@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
12340 {
12341 int cpu = smp_processor_id();
12342 struct task_struct *curr = current;
12343- struct tss_struct *t = &per_cpu(init_tss, cpu);
12344+ struct tss_struct *t = init_tss + cpu;
12345 struct thread_struct *thread = &curr->thread;
12346
12347 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12348diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/intel.c linux-2.6.32.45/arch/x86/kernel/cpu/intel.c
12349--- linux-2.6.32.45/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
12350+++ linux-2.6.32.45/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
12351@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
12352 * Update the IDT descriptor and reload the IDT so that
12353 * it uses the read-only mapped virtual address.
12354 */
12355- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12356+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12357 load_idt(&idt_descr);
12358 }
12359 #endif
12360diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c
12361--- linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
12362+++ linux-2.6.32.45/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
12363@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
12364 return ret;
12365 }
12366
12367-static struct sysfs_ops sysfs_ops = {
12368+static const struct sysfs_ops sysfs_ops = {
12369 .show = show,
12370 .store = store,
12371 };
12372diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/Makefile linux-2.6.32.45/arch/x86/kernel/cpu/Makefile
12373--- linux-2.6.32.45/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
12374+++ linux-2.6.32.45/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
12375@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
12376 CFLAGS_REMOVE_common.o = -pg
12377 endif
12378
12379-# Make sure load_percpu_segment has no stackprotector
12380-nostackp := $(call cc-option, -fno-stack-protector)
12381-CFLAGS_common.o := $(nostackp)
12382-
12383 obj-y := intel_cacheinfo.o addon_cpuid_features.o
12384 obj-y += proc.o capflags.o powerflags.o common.o
12385 obj-y += vmware.o hypervisor.o sched.o
12386diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c
12387--- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
12388+++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
12389@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
12390 return ret;
12391 }
12392
12393-static struct sysfs_ops threshold_ops = {
12394+static const struct sysfs_ops threshold_ops = {
12395 .show = show,
12396 .store = store,
12397 };
12398diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c
12399--- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
12400+++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
12401@@ -43,6 +43,7 @@
12402 #include <asm/ipi.h>
12403 #include <asm/mce.h>
12404 #include <asm/msr.h>
12405+#include <asm/local.h>
12406
12407 #include "mce-internal.h"
12408
12409@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
12410 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12411 m->cs, m->ip);
12412
12413- if (m->cs == __KERNEL_CS)
12414+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12415 print_symbol("{%s}", m->ip);
12416 pr_cont("\n");
12417 }
12418@@ -221,10 +222,10 @@ static void print_mce_tail(void)
12419
12420 #define PANIC_TIMEOUT 5 /* 5 seconds */
12421
12422-static atomic_t mce_paniced;
12423+static atomic_unchecked_t mce_paniced;
12424
12425 static int fake_panic;
12426-static atomic_t mce_fake_paniced;
12427+static atomic_unchecked_t mce_fake_paniced;
12428
12429 /* Panic in progress. Enable interrupts and wait for final IPI */
12430 static void wait_for_panic(void)
12431@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
12432 /*
12433 * Make sure only one CPU runs in machine check panic
12434 */
12435- if (atomic_inc_return(&mce_paniced) > 1)
12436+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12437 wait_for_panic();
12438 barrier();
12439
12440@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
12441 console_verbose();
12442 } else {
12443 /* Don't log too much for fake panic */
12444- if (atomic_inc_return(&mce_fake_paniced) > 1)
12445+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12446 return;
12447 }
12448 print_mce_head();
12449@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
12450 * might have been modified by someone else.
12451 */
12452 rmb();
12453- if (atomic_read(&mce_paniced))
12454+ if (atomic_read_unchecked(&mce_paniced))
12455 wait_for_panic();
12456 if (!monarch_timeout)
12457 goto out;
12458@@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
12459 */
12460
12461 static DEFINE_SPINLOCK(mce_state_lock);
12462-static int open_count; /* #times opened */
12463+static local_t open_count; /* #times opened */
12464 static int open_exclu; /* already open exclusive? */
12465
12466 static int mce_open(struct inode *inode, struct file *file)
12467 {
12468 spin_lock(&mce_state_lock);
12469
12470- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
12471+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
12472 spin_unlock(&mce_state_lock);
12473
12474 return -EBUSY;
12475@@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
12476
12477 if (file->f_flags & O_EXCL)
12478 open_exclu = 1;
12479- open_count++;
12480+ local_inc(&open_count);
12481
12482 spin_unlock(&mce_state_lock);
12483
12484@@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
12485 {
12486 spin_lock(&mce_state_lock);
12487
12488- open_count--;
12489+ local_dec(&open_count);
12490 open_exclu = 0;
12491
12492 spin_unlock(&mce_state_lock);
12493@@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
12494 static void mce_reset(void)
12495 {
12496 cpu_missing = 0;
12497- atomic_set(&mce_fake_paniced, 0);
12498+ atomic_set_unchecked(&mce_fake_paniced, 0);
12499 atomic_set(&mce_executing, 0);
12500 atomic_set(&mce_callin, 0);
12501 atomic_set(&global_nwo, 0);
12502diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c
12503--- linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-03-27 14:31:47.000000000 -0400
12504+++ linux-2.6.32.45/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-05 20:33:55.000000000 -0400
12505@@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *fi
12506 static int inject_init(void)
12507 {
12508 printk(KERN_INFO "Machine check injector initialized\n");
12509- mce_chrdev_ops.write = mce_write;
12510+ pax_open_kernel();
12511+ *(void **)&mce_chrdev_ops.write = mce_write;
12512+ pax_close_kernel();
12513 register_die_notifier(&mce_raise_nb);
12514 return 0;
12515 }
12516diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c
12517--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
12518+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
12519@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
12520 return 0;
12521 }
12522
12523-static struct mtrr_ops amd_mtrr_ops = {
12524+static const struct mtrr_ops amd_mtrr_ops = {
12525 .vendor = X86_VENDOR_AMD,
12526 .set = amd_set_mtrr,
12527 .get = amd_get_mtrr,
12528diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c
12529--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
12530+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
12531@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
12532 return 0;
12533 }
12534
12535-static struct mtrr_ops centaur_mtrr_ops = {
12536+static const struct mtrr_ops centaur_mtrr_ops = {
12537 .vendor = X86_VENDOR_CENTAUR,
12538 .set = centaur_set_mcr,
12539 .get = centaur_get_mcr,
12540diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c
12541--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
12542+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
12543@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
12544 post_set();
12545 }
12546
12547-static struct mtrr_ops cyrix_mtrr_ops = {
12548+static const struct mtrr_ops cyrix_mtrr_ops = {
12549 .vendor = X86_VENDOR_CYRIX,
12550 .set_all = cyrix_set_all,
12551 .set = cyrix_set_arr,
12552diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c
12553--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
12554+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
12555@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
12556 /*
12557 * Generic structure...
12558 */
12559-struct mtrr_ops generic_mtrr_ops = {
12560+const struct mtrr_ops generic_mtrr_ops = {
12561 .use_intel_if = 1,
12562 .set_all = generic_set_all,
12563 .get = generic_get_mtrr,
12564diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c
12565--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
12566+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
12567@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
12568 u64 size_or_mask, size_and_mask;
12569 static bool mtrr_aps_delayed_init;
12570
12571-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12572+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12573
12574-struct mtrr_ops *mtrr_if;
12575+const struct mtrr_ops *mtrr_if;
12576
12577 static void set_mtrr(unsigned int reg, unsigned long base,
12578 unsigned long size, mtrr_type type);
12579
12580-void set_mtrr_ops(struct mtrr_ops *ops)
12581+void set_mtrr_ops(const struct mtrr_ops *ops)
12582 {
12583 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12584 mtrr_ops[ops->vendor] = ops;
12585diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h
12586--- linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
12587+++ linux-2.6.32.45/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
12588@@ -12,19 +12,19 @@
12589 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
12590
12591 struct mtrr_ops {
12592- u32 vendor;
12593- u32 use_intel_if;
12594- void (*set)(unsigned int reg, unsigned long base,
12595+ const u32 vendor;
12596+ const u32 use_intel_if;
12597+ void (* const set)(unsigned int reg, unsigned long base,
12598 unsigned long size, mtrr_type type);
12599- void (*set_all)(void);
12600+ void (* const set_all)(void);
12601
12602- void (*get)(unsigned int reg, unsigned long *base,
12603+ void (* const get)(unsigned int reg, unsigned long *base,
12604 unsigned long *size, mtrr_type *type);
12605- int (*get_free_region)(unsigned long base, unsigned long size,
12606+ int (* const get_free_region)(unsigned long base, unsigned long size,
12607 int replace_reg);
12608- int (*validate_add_page)(unsigned long base, unsigned long size,
12609+ int (* const validate_add_page)(unsigned long base, unsigned long size,
12610 unsigned int type);
12611- int (*have_wrcomb)(void);
12612+ int (* const have_wrcomb)(void);
12613 };
12614
12615 extern int generic_get_free_region(unsigned long base, unsigned long size,
12616@@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
12617 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12618 unsigned int type);
12619
12620-extern struct mtrr_ops generic_mtrr_ops;
12621+extern const struct mtrr_ops generic_mtrr_ops;
12622
12623 extern int positive_have_wrcomb(void);
12624
12625@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12626 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12627 void get_mtrr_state(void);
12628
12629-extern void set_mtrr_ops(struct mtrr_ops *ops);
12630+extern void set_mtrr_ops(const struct mtrr_ops *ops);
12631
12632 extern u64 size_or_mask, size_and_mask;
12633-extern struct mtrr_ops *mtrr_if;
12634+extern const struct mtrr_ops *mtrr_if;
12635
12636 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12637 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12638diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c
12639--- linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
12640+++ linux-2.6.32.45/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
12641@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12642
12643 /* Interface defining a CPU specific perfctr watchdog */
12644 struct wd_ops {
12645- int (*reserve)(void);
12646- void (*unreserve)(void);
12647- int (*setup)(unsigned nmi_hz);
12648- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12649- void (*stop)(void);
12650+ int (* const reserve)(void);
12651+ void (* const unreserve)(void);
12652+ int (* const setup)(unsigned nmi_hz);
12653+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12654+ void (* const stop)(void);
12655 unsigned perfctr;
12656 unsigned evntsel;
12657 u64 checkbit;
12658@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12659 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12660 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12661
12662+/* cannot be const */
12663 static struct wd_ops intel_arch_wd_ops;
12664
12665 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12666@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12667 return 1;
12668 }
12669
12670+/* cannot be const */
12671 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12672 .reserve = single_msr_reserve,
12673 .unreserve = single_msr_unreserve,
12674diff -urNp linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c
12675--- linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12676+++ linux-2.6.32.45/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12677@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12678 * count to the generic event atomically:
12679 */
12680 again:
12681- prev_raw_count = atomic64_read(&hwc->prev_count);
12682+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12683 rdmsrl(hwc->event_base + idx, new_raw_count);
12684
12685- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12686+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12687 new_raw_count) != prev_raw_count)
12688 goto again;
12689
12690@@ -741,7 +741,7 @@ again:
12691 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12692 delta >>= shift;
12693
12694- atomic64_add(delta, &event->count);
12695+ atomic64_add_unchecked(delta, &event->count);
12696 atomic64_sub(delta, &hwc->period_left);
12697
12698 return new_raw_count;
12699@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12700 * The hw event starts counting from this event offset,
12701 * mark it to be able to extra future deltas:
12702 */
12703- atomic64_set(&hwc->prev_count, (u64)-left);
12704+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12705
12706 err = checking_wrmsrl(hwc->event_base + idx,
12707 (u64)(-left) & x86_pmu.event_mask);
12708@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12709 break;
12710
12711 callchain_store(entry, frame.return_address);
12712- fp = frame.next_frame;
12713+ fp = (__force const void __user *)frame.next_frame;
12714 }
12715 }
12716
12717diff -urNp linux-2.6.32.45/arch/x86/kernel/crash.c linux-2.6.32.45/arch/x86/kernel/crash.c
12718--- linux-2.6.32.45/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12719+++ linux-2.6.32.45/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12720@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12721 regs = args->regs;
12722
12723 #ifdef CONFIG_X86_32
12724- if (!user_mode_vm(regs)) {
12725+ if (!user_mode(regs)) {
12726 crash_fixup_ss_esp(&fixed_regs, regs);
12727 regs = &fixed_regs;
12728 }
12729diff -urNp linux-2.6.32.45/arch/x86/kernel/doublefault_32.c linux-2.6.32.45/arch/x86/kernel/doublefault_32.c
12730--- linux-2.6.32.45/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12731+++ linux-2.6.32.45/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12732@@ -11,7 +11,7 @@
12733
12734 #define DOUBLEFAULT_STACKSIZE (1024)
12735 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12736-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12737+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12738
12739 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12740
12741@@ -21,7 +21,7 @@ static void doublefault_fn(void)
12742 unsigned long gdt, tss;
12743
12744 store_gdt(&gdt_desc);
12745- gdt = gdt_desc.address;
12746+ gdt = (unsigned long)gdt_desc.address;
12747
12748 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12749
12750@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12751 /* 0x2 bit is always set */
12752 .flags = X86_EFLAGS_SF | 0x2,
12753 .sp = STACK_START,
12754- .es = __USER_DS,
12755+ .es = __KERNEL_DS,
12756 .cs = __KERNEL_CS,
12757 .ss = __KERNEL_DS,
12758- .ds = __USER_DS,
12759+ .ds = __KERNEL_DS,
12760 .fs = __KERNEL_PERCPU,
12761
12762 .__cr3 = __pa_nodebug(swapper_pg_dir),
12763diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c
12764--- linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12765+++ linux-2.6.32.45/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12766@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12767 #endif
12768
12769 for (;;) {
12770- struct thread_info *context;
12771+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12772+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12773
12774- context = (struct thread_info *)
12775- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12776- bp = print_context_stack(context, stack, bp, ops,
12777- data, NULL, &graph);
12778-
12779- stack = (unsigned long *)context->previous_esp;
12780- if (!stack)
12781+ if (stack_start == task_stack_page(task))
12782 break;
12783+ stack = *(unsigned long **)stack_start;
12784 if (ops->stack(data, "IRQ") < 0)
12785 break;
12786 touch_nmi_watchdog();
12787@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12788 * When in-kernel, we also print out the stack and code at the
12789 * time of the fault..
12790 */
12791- if (!user_mode_vm(regs)) {
12792+ if (!user_mode(regs)) {
12793 unsigned int code_prologue = code_bytes * 43 / 64;
12794 unsigned int code_len = code_bytes;
12795 unsigned char c;
12796 u8 *ip;
12797+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12798
12799 printk(KERN_EMERG "Stack:\n");
12800 show_stack_log_lvl(NULL, regs, &regs->sp,
12801@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12802
12803 printk(KERN_EMERG "Code: ");
12804
12805- ip = (u8 *)regs->ip - code_prologue;
12806+ ip = (u8 *)regs->ip - code_prologue + cs_base;
12807 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12808 /* try starting at IP */
12809- ip = (u8 *)regs->ip;
12810+ ip = (u8 *)regs->ip + cs_base;
12811 code_len = code_len - code_prologue + 1;
12812 }
12813 for (i = 0; i < code_len; i++, ip++) {
12814@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12815 printk(" Bad EIP value.");
12816 break;
12817 }
12818- if (ip == (u8 *)regs->ip)
12819+ if (ip == (u8 *)regs->ip + cs_base)
12820 printk("<%02x> ", c);
12821 else
12822 printk("%02x ", c);
12823@@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12824 {
12825 unsigned short ud2;
12826
12827+ ip = ktla_ktva(ip);
12828 if (ip < PAGE_OFFSET)
12829 return 0;
12830 if (probe_kernel_address((unsigned short *)ip, ud2))
12831diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c
12832--- linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12833+++ linux-2.6.32.45/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12834@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12835 unsigned long *irq_stack_end =
12836 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12837 unsigned used = 0;
12838- struct thread_info *tinfo;
12839 int graph = 0;
12840+ void *stack_start;
12841
12842 if (!task)
12843 task = current;
12844@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12845 * current stack address. If the stacks consist of nested
12846 * exceptions
12847 */
12848- tinfo = task_thread_info(task);
12849 for (;;) {
12850 char *id;
12851 unsigned long *estack_end;
12852+
12853 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12854 &used, &id);
12855
12856@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12857 if (ops->stack(data, id) < 0)
12858 break;
12859
12860- bp = print_context_stack(tinfo, stack, bp, ops,
12861+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12862 data, estack_end, &graph);
12863 ops->stack(data, "<EOE>");
12864 /*
12865@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12866 if (stack >= irq_stack && stack < irq_stack_end) {
12867 if (ops->stack(data, "IRQ") < 0)
12868 break;
12869- bp = print_context_stack(tinfo, stack, bp,
12870+ bp = print_context_stack(task, irq_stack, stack, bp,
12871 ops, data, irq_stack_end, &graph);
12872 /*
12873 * We link to the next stack (which would be
12874@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12875 /*
12876 * This handles the process stack:
12877 */
12878- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12879+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12880+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12881 put_cpu();
12882 }
12883 EXPORT_SYMBOL(dump_trace);
12884diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack.c linux-2.6.32.45/arch/x86/kernel/dumpstack.c
12885--- linux-2.6.32.45/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12886+++ linux-2.6.32.45/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12887@@ -2,6 +2,9 @@
12888 * Copyright (C) 1991, 1992 Linus Torvalds
12889 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12890 */
12891+#ifdef CONFIG_GRKERNSEC_HIDESYM
12892+#define __INCLUDED_BY_HIDESYM 1
12893+#endif
12894 #include <linux/kallsyms.h>
12895 #include <linux/kprobes.h>
12896 #include <linux/uaccess.h>
12897@@ -28,7 +31,7 @@ static int die_counter;
12898
12899 void printk_address(unsigned long address, int reliable)
12900 {
12901- printk(" [<%p>] %s%pS\n", (void *) address,
12902+ printk(" [<%p>] %s%pA\n", (void *) address,
12903 reliable ? "" : "? ", (void *) address);
12904 }
12905
12906@@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12907 static void
12908 print_ftrace_graph_addr(unsigned long addr, void *data,
12909 const struct stacktrace_ops *ops,
12910- struct thread_info *tinfo, int *graph)
12911+ struct task_struct *task, int *graph)
12912 {
12913- struct task_struct *task = tinfo->task;
12914 unsigned long ret_addr;
12915 int index = task->curr_ret_stack;
12916
12917@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12918 static inline void
12919 print_ftrace_graph_addr(unsigned long addr, void *data,
12920 const struct stacktrace_ops *ops,
12921- struct thread_info *tinfo, int *graph)
12922+ struct task_struct *task, int *graph)
12923 { }
12924 #endif
12925
12926@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12927 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12928 */
12929
12930-static inline int valid_stack_ptr(struct thread_info *tinfo,
12931- void *p, unsigned int size, void *end)
12932+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12933 {
12934- void *t = tinfo;
12935 if (end) {
12936 if (p < end && p >= (end-THREAD_SIZE))
12937 return 1;
12938@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12939 }
12940
12941 unsigned long
12942-print_context_stack(struct thread_info *tinfo,
12943+print_context_stack(struct task_struct *task, void *stack_start,
12944 unsigned long *stack, unsigned long bp,
12945 const struct stacktrace_ops *ops, void *data,
12946 unsigned long *end, int *graph)
12947 {
12948 struct stack_frame *frame = (struct stack_frame *)bp;
12949
12950- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12951+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12952 unsigned long addr;
12953
12954 addr = *stack;
12955@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12956 } else {
12957 ops->address(data, addr, 0);
12958 }
12959- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12960+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12961 }
12962 stack++;
12963 }
12964@@ -180,7 +180,7 @@ void dump_stack(void)
12965 #endif
12966
12967 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12968- current->pid, current->comm, print_tainted(),
12969+ task_pid_nr(current), current->comm, print_tainted(),
12970 init_utsname()->release,
12971 (int)strcspn(init_utsname()->version, " "),
12972 init_utsname()->version);
12973@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12974 return flags;
12975 }
12976
12977+extern void gr_handle_kernel_exploit(void);
12978+
12979 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12980 {
12981 if (regs && kexec_should_crash(current))
12982@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
12983 panic("Fatal exception in interrupt");
12984 if (panic_on_oops)
12985 panic("Fatal exception");
12986- do_exit(signr);
12987+
12988+ gr_handle_kernel_exploit();
12989+
12990+ do_group_exit(signr);
12991 }
12992
12993 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12994@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
12995 unsigned long flags = oops_begin();
12996 int sig = SIGSEGV;
12997
12998- if (!user_mode_vm(regs))
12999+ if (!user_mode(regs))
13000 report_bug(regs->ip, regs);
13001
13002 if (__die(str, regs, err))
13003diff -urNp linux-2.6.32.45/arch/x86/kernel/dumpstack.h linux-2.6.32.45/arch/x86/kernel/dumpstack.h
13004--- linux-2.6.32.45/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
13005+++ linux-2.6.32.45/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
13006@@ -15,7 +15,7 @@
13007 #endif
13008
13009 extern unsigned long
13010-print_context_stack(struct thread_info *tinfo,
13011+print_context_stack(struct task_struct *task, void *stack_start,
13012 unsigned long *stack, unsigned long bp,
13013 const struct stacktrace_ops *ops, void *data,
13014 unsigned long *end, int *graph);
13015diff -urNp linux-2.6.32.45/arch/x86/kernel/e820.c linux-2.6.32.45/arch/x86/kernel/e820.c
13016--- linux-2.6.32.45/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
13017+++ linux-2.6.32.45/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
13018@@ -733,7 +733,7 @@ struct early_res {
13019 };
13020 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
13021 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
13022- {}
13023+ { 0, 0, {0}, 0 }
13024 };
13025
13026 static int __init find_overlapped_early(u64 start, u64 end)
13027diff -urNp linux-2.6.32.45/arch/x86/kernel/early_printk.c linux-2.6.32.45/arch/x86/kernel/early_printk.c
13028--- linux-2.6.32.45/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
13029+++ linux-2.6.32.45/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
13030@@ -7,6 +7,7 @@
13031 #include <linux/pci_regs.h>
13032 #include <linux/pci_ids.h>
13033 #include <linux/errno.h>
13034+#include <linux/sched.h>
13035 #include <asm/io.h>
13036 #include <asm/processor.h>
13037 #include <asm/fcntl.h>
13038@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
13039 int n;
13040 va_list ap;
13041
13042+ pax_track_stack();
13043+
13044 va_start(ap, fmt);
13045 n = vscnprintf(buf, sizeof(buf), fmt, ap);
13046 early_console->write(early_console, buf, n);
13047diff -urNp linux-2.6.32.45/arch/x86/kernel/efi_32.c linux-2.6.32.45/arch/x86/kernel/efi_32.c
13048--- linux-2.6.32.45/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
13049+++ linux-2.6.32.45/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
13050@@ -38,70 +38,38 @@
13051 */
13052
13053 static unsigned long efi_rt_eflags;
13054-static pgd_t efi_bak_pg_dir_pointer[2];
13055+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
13056
13057-void efi_call_phys_prelog(void)
13058+void __init efi_call_phys_prelog(void)
13059 {
13060- unsigned long cr4;
13061- unsigned long temp;
13062 struct desc_ptr gdt_descr;
13063
13064 local_irq_save(efi_rt_eflags);
13065
13066- /*
13067- * If I don't have PAE, I should just duplicate two entries in page
13068- * directory. If I have PAE, I just need to duplicate one entry in
13069- * page directory.
13070- */
13071- cr4 = read_cr4_safe();
13072
13073- if (cr4 & X86_CR4_PAE) {
13074- efi_bak_pg_dir_pointer[0].pgd =
13075- swapper_pg_dir[pgd_index(0)].pgd;
13076- swapper_pg_dir[0].pgd =
13077- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
13078- } else {
13079- efi_bak_pg_dir_pointer[0].pgd =
13080- swapper_pg_dir[pgd_index(0)].pgd;
13081- efi_bak_pg_dir_pointer[1].pgd =
13082- swapper_pg_dir[pgd_index(0x400000)].pgd;
13083- swapper_pg_dir[pgd_index(0)].pgd =
13084- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
13085- temp = PAGE_OFFSET + 0x400000;
13086- swapper_pg_dir[pgd_index(0x400000)].pgd =
13087- swapper_pg_dir[pgd_index(temp)].pgd;
13088- }
13089+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
13090+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
13091+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
13092
13093 /*
13094 * After the lock is released, the original page table is restored.
13095 */
13096 __flush_tlb_all();
13097
13098- gdt_descr.address = __pa(get_cpu_gdt_table(0));
13099+ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
13100 gdt_descr.size = GDT_SIZE - 1;
13101 load_gdt(&gdt_descr);
13102 }
13103
13104-void efi_call_phys_epilog(void)
13105+void __init efi_call_phys_epilog(void)
13106 {
13107- unsigned long cr4;
13108 struct desc_ptr gdt_descr;
13109
13110- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
13111+ gdt_descr.address = get_cpu_gdt_table(0);
13112 gdt_descr.size = GDT_SIZE - 1;
13113 load_gdt(&gdt_descr);
13114
13115- cr4 = read_cr4_safe();
13116-
13117- if (cr4 & X86_CR4_PAE) {
13118- swapper_pg_dir[pgd_index(0)].pgd =
13119- efi_bak_pg_dir_pointer[0].pgd;
13120- } else {
13121- swapper_pg_dir[pgd_index(0)].pgd =
13122- efi_bak_pg_dir_pointer[0].pgd;
13123- swapper_pg_dir[pgd_index(0x400000)].pgd =
13124- efi_bak_pg_dir_pointer[1].pgd;
13125- }
13126+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
13127
13128 /*
13129 * After the lock is released, the original page table is restored.
13130diff -urNp linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S
13131--- linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
13132+++ linux-2.6.32.45/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
13133@@ -6,6 +6,7 @@
13134 */
13135
13136 #include <linux/linkage.h>
13137+#include <linux/init.h>
13138 #include <asm/page_types.h>
13139
13140 /*
13141@@ -20,7 +21,7 @@
13142 * service functions will comply with gcc calling convention, too.
13143 */
13144
13145-.text
13146+__INIT
13147 ENTRY(efi_call_phys)
13148 /*
13149 * 0. The function can only be called in Linux kernel. So CS has been
13150@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
13151 * The mapping of lower virtual memory has been created in prelog and
13152 * epilog.
13153 */
13154- movl $1f, %edx
13155- subl $__PAGE_OFFSET, %edx
13156- jmp *%edx
13157+ jmp 1f-__PAGE_OFFSET
13158 1:
13159
13160 /*
13161@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
13162 * parameter 2, ..., param n. To make things easy, we save the return
13163 * address of efi_call_phys in a global variable.
13164 */
13165- popl %edx
13166- movl %edx, saved_return_addr
13167- /* get the function pointer into ECX*/
13168- popl %ecx
13169- movl %ecx, efi_rt_function_ptr
13170- movl $2f, %edx
13171- subl $__PAGE_OFFSET, %edx
13172- pushl %edx
13173+ popl (saved_return_addr)
13174+ popl (efi_rt_function_ptr)
13175
13176 /*
13177 * 3. Clear PG bit in %CR0.
13178@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
13179 /*
13180 * 5. Call the physical function.
13181 */
13182- jmp *%ecx
13183+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
13184
13185-2:
13186 /*
13187 * 6. After EFI runtime service returns, control will return to
13188 * following instruction. We'd better readjust stack pointer first.
13189@@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
13190 movl %cr0, %edx
13191 orl $0x80000000, %edx
13192 movl %edx, %cr0
13193- jmp 1f
13194-1:
13195+
13196 /*
13197 * 8. Now restore the virtual mode from flat mode by
13198 * adding EIP with PAGE_OFFSET.
13199 */
13200- movl $1f, %edx
13201- jmp *%edx
13202+ jmp 1f+__PAGE_OFFSET
13203 1:
13204
13205 /*
13206 * 9. Balance the stack. And because EAX contain the return value,
13207 * we'd better not clobber it.
13208 */
13209- leal efi_rt_function_ptr, %edx
13210- movl (%edx), %ecx
13211- pushl %ecx
13212+ pushl (efi_rt_function_ptr)
13213
13214 /*
13215- * 10. Push the saved return address onto the stack and return.
13216+ * 10. Return to the saved return address.
13217 */
13218- leal saved_return_addr, %edx
13219- movl (%edx), %ecx
13220- pushl %ecx
13221- ret
13222+ jmpl *(saved_return_addr)
13223 ENDPROC(efi_call_phys)
13224 .previous
13225
13226-.data
13227+__INITDATA
13228 saved_return_addr:
13229 .long 0
13230 efi_rt_function_ptr:
13231diff -urNp linux-2.6.32.45/arch/x86/kernel/entry_32.S linux-2.6.32.45/arch/x86/kernel/entry_32.S
13232--- linux-2.6.32.45/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
13233+++ linux-2.6.32.45/arch/x86/kernel/entry_32.S 2011-05-22 23:02:03.000000000 -0400
13234@@ -185,13 +185,146 @@
13235 /*CFI_REL_OFFSET gs, PT_GS*/
13236 .endm
13237 .macro SET_KERNEL_GS reg
13238+
13239+#ifdef CONFIG_CC_STACKPROTECTOR
13240 movl $(__KERNEL_STACK_CANARY), \reg
13241+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13242+ movl $(__USER_DS), \reg
13243+#else
13244+ xorl \reg, \reg
13245+#endif
13246+
13247 movl \reg, %gs
13248 .endm
13249
13250 #endif /* CONFIG_X86_32_LAZY_GS */
13251
13252-.macro SAVE_ALL
13253+.macro pax_enter_kernel
13254+#ifdef CONFIG_PAX_KERNEXEC
13255+ call pax_enter_kernel
13256+#endif
13257+.endm
13258+
13259+.macro pax_exit_kernel
13260+#ifdef CONFIG_PAX_KERNEXEC
13261+ call pax_exit_kernel
13262+#endif
13263+.endm
13264+
13265+#ifdef CONFIG_PAX_KERNEXEC
13266+ENTRY(pax_enter_kernel)
13267+#ifdef CONFIG_PARAVIRT
13268+ pushl %eax
13269+ pushl %ecx
13270+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
13271+ mov %eax, %esi
13272+#else
13273+ mov %cr0, %esi
13274+#endif
13275+ bts $16, %esi
13276+ jnc 1f
13277+ mov %cs, %esi
13278+ cmp $__KERNEL_CS, %esi
13279+ jz 3f
13280+ ljmp $__KERNEL_CS, $3f
13281+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
13282+2:
13283+#ifdef CONFIG_PARAVIRT
13284+ mov %esi, %eax
13285+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13286+#else
13287+ mov %esi, %cr0
13288+#endif
13289+3:
13290+#ifdef CONFIG_PARAVIRT
13291+ popl %ecx
13292+ popl %eax
13293+#endif
13294+ ret
13295+ENDPROC(pax_enter_kernel)
13296+
13297+ENTRY(pax_exit_kernel)
13298+#ifdef CONFIG_PARAVIRT
13299+ pushl %eax
13300+ pushl %ecx
13301+#endif
13302+ mov %cs, %esi
13303+ cmp $__KERNEXEC_KERNEL_CS, %esi
13304+ jnz 2f
13305+#ifdef CONFIG_PARAVIRT
13306+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
13307+ mov %eax, %esi
13308+#else
13309+ mov %cr0, %esi
13310+#endif
13311+ btr $16, %esi
13312+ ljmp $__KERNEL_CS, $1f
13313+1:
13314+#ifdef CONFIG_PARAVIRT
13315+ mov %esi, %eax
13316+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
13317+#else
13318+ mov %esi, %cr0
13319+#endif
13320+2:
13321+#ifdef CONFIG_PARAVIRT
13322+ popl %ecx
13323+ popl %eax
13324+#endif
13325+ ret
13326+ENDPROC(pax_exit_kernel)
13327+#endif
13328+
13329+.macro pax_erase_kstack
13330+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13331+ call pax_erase_kstack
13332+#endif
13333+.endm
13334+
13335+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13336+/*
13337+ * ebp: thread_info
13338+ * ecx, edx: can be clobbered
13339+ */
13340+ENTRY(pax_erase_kstack)
13341+ pushl %edi
13342+ pushl %eax
13343+
13344+ mov TI_lowest_stack(%ebp), %edi
13345+ mov $-0xBEEF, %eax
13346+ std
13347+
13348+1: mov %edi, %ecx
13349+ and $THREAD_SIZE_asm - 1, %ecx
13350+ shr $2, %ecx
13351+ repne scasl
13352+ jecxz 2f
13353+
13354+ cmp $2*16, %ecx
13355+ jc 2f
13356+
13357+ mov $2*16, %ecx
13358+ repe scasl
13359+ jecxz 2f
13360+ jne 1b
13361+
13362+2: cld
13363+ mov %esp, %ecx
13364+ sub %edi, %ecx
13365+ shr $2, %ecx
13366+ rep stosl
13367+
13368+ mov TI_task_thread_sp0(%ebp), %edi
13369+ sub $128, %edi
13370+ mov %edi, TI_lowest_stack(%ebp)
13371+
13372+ popl %eax
13373+ popl %edi
13374+ ret
13375+ENDPROC(pax_erase_kstack)
13376+#endif
13377+
13378+.macro __SAVE_ALL _DS
13379 cld
13380 PUSH_GS
13381 pushl %fs
13382@@ -224,7 +357,7 @@
13383 pushl %ebx
13384 CFI_ADJUST_CFA_OFFSET 4
13385 CFI_REL_OFFSET ebx, 0
13386- movl $(__USER_DS), %edx
13387+ movl $\_DS, %edx
13388 movl %edx, %ds
13389 movl %edx, %es
13390 movl $(__KERNEL_PERCPU), %edx
13391@@ -232,6 +365,15 @@
13392 SET_KERNEL_GS %edx
13393 .endm
13394
13395+.macro SAVE_ALL
13396+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13397+ __SAVE_ALL __KERNEL_DS
13398+ pax_enter_kernel
13399+#else
13400+ __SAVE_ALL __USER_DS
13401+#endif
13402+.endm
13403+
13404 .macro RESTORE_INT_REGS
13405 popl %ebx
13406 CFI_ADJUST_CFA_OFFSET -4
13407@@ -352,7 +494,15 @@ check_userspace:
13408 movb PT_CS(%esp), %al
13409 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13410 cmpl $USER_RPL, %eax
13411+
13412+#ifdef CONFIG_PAX_KERNEXEC
13413+ jae resume_userspace
13414+
13415+ PAX_EXIT_KERNEL
13416+ jmp resume_kernel
13417+#else
13418 jb resume_kernel # not returning to v8086 or userspace
13419+#endif
13420
13421 ENTRY(resume_userspace)
13422 LOCKDEP_SYS_EXIT
13423@@ -364,7 +514,7 @@ ENTRY(resume_userspace)
13424 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13425 # int/exception return?
13426 jne work_pending
13427- jmp restore_all
13428+ jmp restore_all_pax
13429 END(ret_from_exception)
13430
13431 #ifdef CONFIG_PREEMPT
13432@@ -414,25 +564,36 @@ sysenter_past_esp:
13433 /*CFI_REL_OFFSET cs, 0*/
13434 /*
13435 * Push current_thread_info()->sysenter_return to the stack.
13436- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13437- * pushed above; +8 corresponds to copy_thread's esp0 setting.
13438 */
13439- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
13440+ pushl $0
13441 CFI_ADJUST_CFA_OFFSET 4
13442 CFI_REL_OFFSET eip, 0
13443
13444 pushl %eax
13445 CFI_ADJUST_CFA_OFFSET 4
13446 SAVE_ALL
13447+ GET_THREAD_INFO(%ebp)
13448+ movl TI_sysenter_return(%ebp),%ebp
13449+ movl %ebp,PT_EIP(%esp)
13450 ENABLE_INTERRUPTS(CLBR_NONE)
13451
13452 /*
13453 * Load the potential sixth argument from user stack.
13454 * Careful about security.
13455 */
13456+ movl PT_OLDESP(%esp),%ebp
13457+
13458+#ifdef CONFIG_PAX_MEMORY_UDEREF
13459+ mov PT_OLDSS(%esp),%ds
13460+1: movl %ds:(%ebp),%ebp
13461+ push %ss
13462+ pop %ds
13463+#else
13464 cmpl $__PAGE_OFFSET-3,%ebp
13465 jae syscall_fault
13466 1: movl (%ebp),%ebp
13467+#endif
13468+
13469 movl %ebp,PT_EBP(%esp)
13470 .section __ex_table,"a"
13471 .align 4
13472@@ -455,12 +616,23 @@ sysenter_do_call:
13473 testl $_TIF_ALLWORK_MASK, %ecx
13474 jne sysexit_audit
13475 sysenter_exit:
13476+
13477+#ifdef CONFIG_PAX_RANDKSTACK
13478+ pushl_cfi %eax
13479+ call pax_randomize_kstack
13480+ popl_cfi %eax
13481+#endif
13482+
13483+ pax_erase_kstack
13484+
13485 /* if something modifies registers it must also disable sysexit */
13486 movl PT_EIP(%esp), %edx
13487 movl PT_OLDESP(%esp), %ecx
13488 xorl %ebp,%ebp
13489 TRACE_IRQS_ON
13490 1: mov PT_FS(%esp), %fs
13491+2: mov PT_DS(%esp), %ds
13492+3: mov PT_ES(%esp), %es
13493 PTGS_TO_GS
13494 ENABLE_INTERRUPTS_SYSEXIT
13495
13496@@ -477,6 +649,9 @@ sysenter_audit:
13497 movl %eax,%edx /* 2nd arg: syscall number */
13498 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13499 call audit_syscall_entry
13500+
13501+ pax_erase_kstack
13502+
13503 pushl %ebx
13504 CFI_ADJUST_CFA_OFFSET 4
13505 movl PT_EAX(%esp),%eax /* reload syscall number */
13506@@ -504,11 +679,17 @@ sysexit_audit:
13507
13508 CFI_ENDPROC
13509 .pushsection .fixup,"ax"
13510-2: movl $0,PT_FS(%esp)
13511+4: movl $0,PT_FS(%esp)
13512+ jmp 1b
13513+5: movl $0,PT_DS(%esp)
13514+ jmp 1b
13515+6: movl $0,PT_ES(%esp)
13516 jmp 1b
13517 .section __ex_table,"a"
13518 .align 4
13519- .long 1b,2b
13520+ .long 1b,4b
13521+ .long 2b,5b
13522+ .long 3b,6b
13523 .popsection
13524 PTGS_TO_GS_EX
13525 ENDPROC(ia32_sysenter_target)
13526@@ -538,6 +719,14 @@ syscall_exit:
13527 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13528 jne syscall_exit_work
13529
13530+restore_all_pax:
13531+
13532+#ifdef CONFIG_PAX_RANDKSTACK
13533+ call pax_randomize_kstack
13534+#endif
13535+
13536+ pax_erase_kstack
13537+
13538 restore_all:
13539 TRACE_IRQS_IRET
13540 restore_all_notrace:
13541@@ -602,7 +791,13 @@ ldt_ss:
13542 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13543 mov %dx, %ax /* eax: new kernel esp */
13544 sub %eax, %edx /* offset (low word is 0) */
13545- PER_CPU(gdt_page, %ebx)
13546+#ifdef CONFIG_SMP
13547+ movl PER_CPU_VAR(cpu_number), %ebx
13548+ shll $PAGE_SHIFT_asm, %ebx
13549+ addl $cpu_gdt_table, %ebx
13550+#else
13551+ movl $cpu_gdt_table, %ebx
13552+#endif
13553 shr $16, %edx
13554 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
13555 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
13556@@ -636,31 +831,25 @@ work_resched:
13557 movl TI_flags(%ebp), %ecx
13558 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13559 # than syscall tracing?
13560- jz restore_all
13561+ jz restore_all_pax
13562 testb $_TIF_NEED_RESCHED, %cl
13563 jnz work_resched
13564
13565 work_notifysig: # deal with pending signals and
13566 # notify-resume requests
13567+ movl %esp, %eax
13568 #ifdef CONFIG_VM86
13569 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13570- movl %esp, %eax
13571- jne work_notifysig_v86 # returning to kernel-space or
13572+ jz 1f # returning to kernel-space or
13573 # vm86-space
13574- xorl %edx, %edx
13575- call do_notify_resume
13576- jmp resume_userspace_sig
13577
13578- ALIGN
13579-work_notifysig_v86:
13580 pushl %ecx # save ti_flags for do_notify_resume
13581 CFI_ADJUST_CFA_OFFSET 4
13582 call save_v86_state # %eax contains pt_regs pointer
13583 popl %ecx
13584 CFI_ADJUST_CFA_OFFSET -4
13585 movl %eax, %esp
13586-#else
13587- movl %esp, %eax
13588+1:
13589 #endif
13590 xorl %edx, %edx
13591 call do_notify_resume
13592@@ -673,6 +862,9 @@ syscall_trace_entry:
13593 movl $-ENOSYS,PT_EAX(%esp)
13594 movl %esp, %eax
13595 call syscall_trace_enter
13596+
13597+ pax_erase_kstack
13598+
13599 /* What it returned is what we'll actually use. */
13600 cmpl $(nr_syscalls), %eax
13601 jnae syscall_call
13602@@ -695,6 +887,10 @@ END(syscall_exit_work)
13603
13604 RING0_INT_FRAME # can't unwind into user space anyway
13605 syscall_fault:
13606+#ifdef CONFIG_PAX_MEMORY_UDEREF
13607+ push %ss
13608+ pop %ds
13609+#endif
13610 GET_THREAD_INFO(%ebp)
13611 movl $-EFAULT,PT_EAX(%esp)
13612 jmp resume_userspace
13613@@ -726,6 +922,33 @@ PTREGSCALL(rt_sigreturn)
13614 PTREGSCALL(vm86)
13615 PTREGSCALL(vm86old)
13616
13617+ ALIGN;
13618+ENTRY(kernel_execve)
13619+ push %ebp
13620+ sub $PT_OLDSS+4,%esp
13621+ push %edi
13622+ push %ecx
13623+ push %eax
13624+ lea 3*4(%esp),%edi
13625+ mov $PT_OLDSS/4+1,%ecx
13626+ xorl %eax,%eax
13627+ rep stosl
13628+ pop %eax
13629+ pop %ecx
13630+ pop %edi
13631+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13632+ mov %eax,PT_EBX(%esp)
13633+ mov %edx,PT_ECX(%esp)
13634+ mov %ecx,PT_EDX(%esp)
13635+ mov %esp,%eax
13636+ call sys_execve
13637+ GET_THREAD_INFO(%ebp)
13638+ test %eax,%eax
13639+ jz syscall_exit
13640+ add $PT_OLDSS+4,%esp
13641+ pop %ebp
13642+ ret
13643+
13644 .macro FIXUP_ESPFIX_STACK
13645 /*
13646 * Switch back for ESPFIX stack to the normal zerobased stack
13647@@ -735,7 +958,13 @@ PTREGSCALL(vm86old)
13648 * normal stack and adjusts ESP with the matching offset.
13649 */
13650 /* fixup the stack */
13651- PER_CPU(gdt_page, %ebx)
13652+#ifdef CONFIG_SMP
13653+ movl PER_CPU_VAR(cpu_number), %ebx
13654+ shll $PAGE_SHIFT_asm, %ebx
13655+ addl $cpu_gdt_table, %ebx
13656+#else
13657+ movl $cpu_gdt_table, %ebx
13658+#endif
13659 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
13660 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
13661 shl $16, %eax
13662@@ -1198,7 +1427,6 @@ return_to_handler:
13663 ret
13664 #endif
13665
13666-.section .rodata,"a"
13667 #include "syscall_table_32.S"
13668
13669 syscall_table_size=(.-sys_call_table)
13670@@ -1255,9 +1483,12 @@ error_code:
13671 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13672 REG_TO_PTGS %ecx
13673 SET_KERNEL_GS %ecx
13674- movl $(__USER_DS), %ecx
13675+ movl $(__KERNEL_DS), %ecx
13676 movl %ecx, %ds
13677 movl %ecx, %es
13678+
13679+ pax_enter_kernel
13680+
13681 TRACE_IRQS_OFF
13682 movl %esp,%eax # pt_regs pointer
13683 call *%edi
13684@@ -1351,6 +1582,9 @@ nmi_stack_correct:
13685 xorl %edx,%edx # zero error code
13686 movl %esp,%eax # pt_regs pointer
13687 call do_nmi
13688+
13689+ pax_exit_kernel
13690+
13691 jmp restore_all_notrace
13692 CFI_ENDPROC
13693
13694@@ -1391,6 +1625,9 @@ nmi_espfix_stack:
13695 FIXUP_ESPFIX_STACK # %eax == %esp
13696 xorl %edx,%edx # zero error code
13697 call do_nmi
13698+
13699+ pax_exit_kernel
13700+
13701 RESTORE_REGS
13702 lss 12+4(%esp), %esp # back to espfix stack
13703 CFI_ADJUST_CFA_OFFSET -24
13704diff -urNp linux-2.6.32.45/arch/x86/kernel/entry_64.S linux-2.6.32.45/arch/x86/kernel/entry_64.S
13705--- linux-2.6.32.45/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13706+++ linux-2.6.32.45/arch/x86/kernel/entry_64.S 2011-06-04 20:30:53.000000000 -0400
13707@@ -53,6 +53,7 @@
13708 #include <asm/paravirt.h>
13709 #include <asm/ftrace.h>
13710 #include <asm/percpu.h>
13711+#include <asm/pgtable.h>
13712
13713 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13714 #include <linux/elf-em.h>
13715@@ -174,6 +175,257 @@ ENTRY(native_usergs_sysret64)
13716 ENDPROC(native_usergs_sysret64)
13717 #endif /* CONFIG_PARAVIRT */
13718
13719+ .macro ljmpq sel, off
13720+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13721+ .byte 0x48; ljmp *1234f(%rip)
13722+ .pushsection .rodata
13723+ .align 16
13724+ 1234: .quad \off; .word \sel
13725+ .popsection
13726+#else
13727+ pushq $\sel
13728+ pushq $\off
13729+ lretq
13730+#endif
13731+ .endm
13732+
13733+ .macro pax_enter_kernel
13734+#ifdef CONFIG_PAX_KERNEXEC
13735+ call pax_enter_kernel
13736+#endif
13737+ .endm
13738+
13739+ .macro pax_exit_kernel
13740+#ifdef CONFIG_PAX_KERNEXEC
13741+ call pax_exit_kernel
13742+#endif
13743+ .endm
13744+
13745+#ifdef CONFIG_PAX_KERNEXEC
13746+ENTRY(pax_enter_kernel)
13747+ pushq %rdi
13748+
13749+#ifdef CONFIG_PARAVIRT
13750+ PV_SAVE_REGS(CLBR_RDI)
13751+#endif
13752+
13753+ GET_CR0_INTO_RDI
13754+ bts $16,%rdi
13755+ jnc 1f
13756+ mov %cs,%edi
13757+ cmp $__KERNEL_CS,%edi
13758+ jz 3f
13759+ ljmpq __KERNEL_CS,3f
13760+1: ljmpq __KERNEXEC_KERNEL_CS,2f
13761+2: SET_RDI_INTO_CR0
13762+3:
13763+
13764+#ifdef CONFIG_PARAVIRT
13765+ PV_RESTORE_REGS(CLBR_RDI)
13766+#endif
13767+
13768+ popq %rdi
13769+ retq
13770+ENDPROC(pax_enter_kernel)
13771+
13772+ENTRY(pax_exit_kernel)
13773+ pushq %rdi
13774+
13775+#ifdef CONFIG_PARAVIRT
13776+ PV_SAVE_REGS(CLBR_RDI)
13777+#endif
13778+
13779+ mov %cs,%rdi
13780+ cmp $__KERNEXEC_KERNEL_CS,%edi
13781+ jnz 2f
13782+ GET_CR0_INTO_RDI
13783+ btr $16,%rdi
13784+ ljmpq __KERNEL_CS,1f
13785+1: SET_RDI_INTO_CR0
13786+2:
13787+
13788+#ifdef CONFIG_PARAVIRT
13789+ PV_RESTORE_REGS(CLBR_RDI);
13790+#endif
13791+
13792+ popq %rdi
13793+ retq
13794+ENDPROC(pax_exit_kernel)
13795+#endif
13796+
13797+ .macro pax_enter_kernel_user
13798+#ifdef CONFIG_PAX_MEMORY_UDEREF
13799+ call pax_enter_kernel_user
13800+#endif
13801+ .endm
13802+
13803+ .macro pax_exit_kernel_user
13804+#ifdef CONFIG_PAX_MEMORY_UDEREF
13805+ call pax_exit_kernel_user
13806+#endif
13807+#ifdef CONFIG_PAX_RANDKSTACK
13808+ push %rax
13809+ call pax_randomize_kstack
13810+ pop %rax
13811+#endif
13812+ pax_erase_kstack
13813+ .endm
13814+
13815+#ifdef CONFIG_PAX_MEMORY_UDEREF
13816+ENTRY(pax_enter_kernel_user)
13817+ pushq %rdi
13818+ pushq %rbx
13819+
13820+#ifdef CONFIG_PARAVIRT
13821+ PV_SAVE_REGS(CLBR_RDI)
13822+#endif
13823+
13824+ GET_CR3_INTO_RDI
13825+ mov %rdi,%rbx
13826+ add $__START_KERNEL_map,%rbx
13827+ sub phys_base(%rip),%rbx
13828+
13829+#ifdef CONFIG_PARAVIRT
13830+ pushq %rdi
13831+ cmpl $0, pv_info+PARAVIRT_enabled
13832+ jz 1f
13833+ i = 0
13834+ .rept USER_PGD_PTRS
13835+ mov i*8(%rbx),%rsi
13836+ mov $0,%sil
13837+ lea i*8(%rbx),%rdi
13838+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13839+ i = i + 1
13840+ .endr
13841+ jmp 2f
13842+1:
13843+#endif
13844+
13845+ i = 0
13846+ .rept USER_PGD_PTRS
13847+ movb $0,i*8(%rbx)
13848+ i = i + 1
13849+ .endr
13850+
13851+#ifdef CONFIG_PARAVIRT
13852+2: popq %rdi
13853+#endif
13854+ SET_RDI_INTO_CR3
13855+
13856+#ifdef CONFIG_PAX_KERNEXEC
13857+ GET_CR0_INTO_RDI
13858+ bts $16,%rdi
13859+ SET_RDI_INTO_CR0
13860+#endif
13861+
13862+#ifdef CONFIG_PARAVIRT
13863+ PV_RESTORE_REGS(CLBR_RDI)
13864+#endif
13865+
13866+ popq %rbx
13867+ popq %rdi
13868+ retq
13869+ENDPROC(pax_enter_kernel_user)
13870+
13871+ENTRY(pax_exit_kernel_user)
13872+ push %rdi
13873+
13874+#ifdef CONFIG_PARAVIRT
13875+ pushq %rbx
13876+ PV_SAVE_REGS(CLBR_RDI)
13877+#endif
13878+
13879+#ifdef CONFIG_PAX_KERNEXEC
13880+ GET_CR0_INTO_RDI
13881+ btr $16,%rdi
13882+ SET_RDI_INTO_CR0
13883+#endif
13884+
13885+ GET_CR3_INTO_RDI
13886+ add $__START_KERNEL_map,%rdi
13887+ sub phys_base(%rip),%rdi
13888+
13889+#ifdef CONFIG_PARAVIRT
13890+ cmpl $0, pv_info+PARAVIRT_enabled
13891+ jz 1f
13892+ mov %rdi,%rbx
13893+ i = 0
13894+ .rept USER_PGD_PTRS
13895+ mov i*8(%rbx),%rsi
13896+ mov $0x67,%sil
13897+ lea i*8(%rbx),%rdi
13898+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13899+ i = i + 1
13900+ .endr
13901+ jmp 2f
13902+1:
13903+#endif
13904+
13905+ i = 0
13906+ .rept USER_PGD_PTRS
13907+ movb $0x67,i*8(%rdi)
13908+ i = i + 1
13909+ .endr
13910+
13911+#ifdef CONFIG_PARAVIRT
13912+2: PV_RESTORE_REGS(CLBR_RDI)
13913+ popq %rbx
13914+#endif
13915+
13916+ popq %rdi
13917+ retq
13918+ENDPROC(pax_exit_kernel_user)
13919+#endif
13920+
13921+.macro pax_erase_kstack
13922+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13923+ call pax_erase_kstack
13924+#endif
13925+.endm
13926+
13927+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13928+/*
13929+ * r10: thread_info
13930+ * rcx, rdx: can be clobbered
13931+ */
13932+ENTRY(pax_erase_kstack)
13933+ pushq %rdi
13934+ pushq %rax
13935+
13936+ GET_THREAD_INFO(%r10)
13937+ mov TI_lowest_stack(%r10), %rdi
13938+ mov $-0xBEEF, %rax
13939+ std
13940+
13941+1: mov %edi, %ecx
13942+ and $THREAD_SIZE_asm - 1, %ecx
13943+ shr $3, %ecx
13944+ repne scasq
13945+ jecxz 2f
13946+
13947+ cmp $2*8, %ecx
13948+ jc 2f
13949+
13950+ mov $2*8, %ecx
13951+ repe scasq
13952+ jecxz 2f
13953+ jne 1b
13954+
13955+2: cld
13956+ mov %esp, %ecx
13957+ sub %edi, %ecx
13958+ shr $3, %ecx
13959+ rep stosq
13960+
13961+ mov TI_task_thread_sp0(%r10), %rdi
13962+ sub $256, %rdi
13963+ mov %rdi, TI_lowest_stack(%r10)
13964+
13965+ popq %rax
13966+ popq %rdi
13967+ ret
13968+ENDPROC(pax_erase_kstack)
13969+#endif
13970
13971 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13972 #ifdef CONFIG_TRACE_IRQFLAGS
13973@@ -317,7 +569,7 @@ ENTRY(save_args)
13974 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
13975 movq_cfi rbp, 8 /* push %rbp */
13976 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
13977- testl $3, CS(%rdi)
13978+ testb $3, CS(%rdi)
13979 je 1f
13980 SWAPGS
13981 /*
13982@@ -409,7 +661,7 @@ ENTRY(ret_from_fork)
13983
13984 RESTORE_REST
13985
13986- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13987+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13988 je int_ret_from_sys_call
13989
13990 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13991@@ -455,7 +707,7 @@ END(ret_from_fork)
13992 ENTRY(system_call)
13993 CFI_STARTPROC simple
13994 CFI_SIGNAL_FRAME
13995- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13996+ CFI_DEF_CFA rsp,0
13997 CFI_REGISTER rip,rcx
13998 /*CFI_REGISTER rflags,r11*/
13999 SWAPGS_UNSAFE_STACK
14000@@ -468,12 +720,13 @@ ENTRY(system_call_after_swapgs)
14001
14002 movq %rsp,PER_CPU_VAR(old_rsp)
14003 movq PER_CPU_VAR(kernel_stack),%rsp
14004+ pax_enter_kernel_user
14005 /*
14006 * No need to follow this irqs off/on section - it's straight
14007 * and short:
14008 */
14009 ENABLE_INTERRUPTS(CLBR_NONE)
14010- SAVE_ARGS 8,1
14011+ SAVE_ARGS 8*6,1
14012 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14013 movq %rcx,RIP-ARGOFFSET(%rsp)
14014 CFI_REL_OFFSET rip,RIP-ARGOFFSET
14015@@ -502,6 +755,7 @@ sysret_check:
14016 andl %edi,%edx
14017 jnz sysret_careful
14018 CFI_REMEMBER_STATE
14019+ pax_exit_kernel_user
14020 /*
14021 * sysretq will re-enable interrupts:
14022 */
14023@@ -562,6 +816,9 @@ auditsys:
14024 movq %rax,%rsi /* 2nd arg: syscall number */
14025 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
14026 call audit_syscall_entry
14027+
14028+ pax_erase_kstack
14029+
14030 LOAD_ARGS 0 /* reload call-clobbered registers */
14031 jmp system_call_fastpath
14032
14033@@ -592,6 +849,9 @@ tracesys:
14034 FIXUP_TOP_OF_STACK %rdi
14035 movq %rsp,%rdi
14036 call syscall_trace_enter
14037+
14038+ pax_erase_kstack
14039+
14040 /*
14041 * Reload arg registers from stack in case ptrace changed them.
14042 * We don't reload %rax because syscall_trace_enter() returned
14043@@ -613,7 +873,7 @@ tracesys:
14044 GLOBAL(int_ret_from_sys_call)
14045 DISABLE_INTERRUPTS(CLBR_NONE)
14046 TRACE_IRQS_OFF
14047- testl $3,CS-ARGOFFSET(%rsp)
14048+ testb $3,CS-ARGOFFSET(%rsp)
14049 je retint_restore_args
14050 movl $_TIF_ALLWORK_MASK,%edi
14051 /* edi: mask to check */
14052@@ -800,6 +1060,16 @@ END(interrupt)
14053 CFI_ADJUST_CFA_OFFSET 10*8
14054 call save_args
14055 PARTIAL_FRAME 0
14056+#ifdef CONFIG_PAX_MEMORY_UDEREF
14057+ testb $3, CS(%rdi)
14058+ jnz 1f
14059+ pax_enter_kernel
14060+ jmp 2f
14061+1: pax_enter_kernel_user
14062+2:
14063+#else
14064+ pax_enter_kernel
14065+#endif
14066 call \func
14067 .endm
14068
14069@@ -822,7 +1092,7 @@ ret_from_intr:
14070 CFI_ADJUST_CFA_OFFSET -8
14071 exit_intr:
14072 GET_THREAD_INFO(%rcx)
14073- testl $3,CS-ARGOFFSET(%rsp)
14074+ testb $3,CS-ARGOFFSET(%rsp)
14075 je retint_kernel
14076
14077 /* Interrupt came from user space */
14078@@ -844,12 +1114,14 @@ retint_swapgs: /* return to user-space
14079 * The iretq could re-enable interrupts:
14080 */
14081 DISABLE_INTERRUPTS(CLBR_ANY)
14082+ pax_exit_kernel_user
14083 TRACE_IRQS_IRETQ
14084 SWAPGS
14085 jmp restore_args
14086
14087 retint_restore_args: /* return to kernel space */
14088 DISABLE_INTERRUPTS(CLBR_ANY)
14089+ pax_exit_kernel
14090 /*
14091 * The iretq could re-enable interrupts:
14092 */
14093@@ -1032,6 +1304,16 @@ ENTRY(\sym)
14094 CFI_ADJUST_CFA_OFFSET 15*8
14095 call error_entry
14096 DEFAULT_FRAME 0
14097+#ifdef CONFIG_PAX_MEMORY_UDEREF
14098+ testb $3, CS(%rsp)
14099+ jnz 1f
14100+ pax_enter_kernel
14101+ jmp 2f
14102+1: pax_enter_kernel_user
14103+2:
14104+#else
14105+ pax_enter_kernel
14106+#endif
14107 movq %rsp,%rdi /* pt_regs pointer */
14108 xorl %esi,%esi /* no error code */
14109 call \do_sym
14110@@ -1049,6 +1331,16 @@ ENTRY(\sym)
14111 subq $15*8, %rsp
14112 call save_paranoid
14113 TRACE_IRQS_OFF
14114+#ifdef CONFIG_PAX_MEMORY_UDEREF
14115+ testb $3, CS(%rsp)
14116+ jnz 1f
14117+ pax_enter_kernel
14118+ jmp 2f
14119+1: pax_enter_kernel_user
14120+2:
14121+#else
14122+ pax_enter_kernel
14123+#endif
14124 movq %rsp,%rdi /* pt_regs pointer */
14125 xorl %esi,%esi /* no error code */
14126 call \do_sym
14127@@ -1066,9 +1358,24 @@ ENTRY(\sym)
14128 subq $15*8, %rsp
14129 call save_paranoid
14130 TRACE_IRQS_OFF
14131+#ifdef CONFIG_PAX_MEMORY_UDEREF
14132+ testb $3, CS(%rsp)
14133+ jnz 1f
14134+ pax_enter_kernel
14135+ jmp 2f
14136+1: pax_enter_kernel_user
14137+2:
14138+#else
14139+ pax_enter_kernel
14140+#endif
14141 movq %rsp,%rdi /* pt_regs pointer */
14142 xorl %esi,%esi /* no error code */
14143- PER_CPU(init_tss, %rbp)
14144+#ifdef CONFIG_SMP
14145+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
14146+ lea init_tss(%rbp), %rbp
14147+#else
14148+ lea init_tss(%rip), %rbp
14149+#endif
14150 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14151 call \do_sym
14152 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14153@@ -1085,6 +1392,16 @@ ENTRY(\sym)
14154 CFI_ADJUST_CFA_OFFSET 15*8
14155 call error_entry
14156 DEFAULT_FRAME 0
14157+#ifdef CONFIG_PAX_MEMORY_UDEREF
14158+ testb $3, CS(%rsp)
14159+ jnz 1f
14160+ pax_enter_kernel
14161+ jmp 2f
14162+1: pax_enter_kernel_user
14163+2:
14164+#else
14165+ pax_enter_kernel
14166+#endif
14167 movq %rsp,%rdi /* pt_regs pointer */
14168 movq ORIG_RAX(%rsp),%rsi /* get error code */
14169 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14170@@ -1104,6 +1421,16 @@ ENTRY(\sym)
14171 call save_paranoid
14172 DEFAULT_FRAME 0
14173 TRACE_IRQS_OFF
14174+#ifdef CONFIG_PAX_MEMORY_UDEREF
14175+ testb $3, CS(%rsp)
14176+ jnz 1f
14177+ pax_enter_kernel
14178+ jmp 2f
14179+1: pax_enter_kernel_user
14180+2:
14181+#else
14182+ pax_enter_kernel
14183+#endif
14184 movq %rsp,%rdi /* pt_regs pointer */
14185 movq ORIG_RAX(%rsp),%rsi /* get error code */
14186 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14187@@ -1405,14 +1732,27 @@ ENTRY(paranoid_exit)
14188 TRACE_IRQS_OFF
14189 testl %ebx,%ebx /* swapgs needed? */
14190 jnz paranoid_restore
14191- testl $3,CS(%rsp)
14192+ testb $3,CS(%rsp)
14193 jnz paranoid_userspace
14194+#ifdef CONFIG_PAX_MEMORY_UDEREF
14195+ pax_exit_kernel
14196+ TRACE_IRQS_IRETQ 0
14197+ SWAPGS_UNSAFE_STACK
14198+ RESTORE_ALL 8
14199+ jmp irq_return
14200+#endif
14201 paranoid_swapgs:
14202+#ifdef CONFIG_PAX_MEMORY_UDEREF
14203+ pax_exit_kernel_user
14204+#else
14205+ pax_exit_kernel
14206+#endif
14207 TRACE_IRQS_IRETQ 0
14208 SWAPGS_UNSAFE_STACK
14209 RESTORE_ALL 8
14210 jmp irq_return
14211 paranoid_restore:
14212+ pax_exit_kernel
14213 TRACE_IRQS_IRETQ 0
14214 RESTORE_ALL 8
14215 jmp irq_return
14216@@ -1470,7 +1810,7 @@ ENTRY(error_entry)
14217 movq_cfi r14, R14+8
14218 movq_cfi r15, R15+8
14219 xorl %ebx,%ebx
14220- testl $3,CS+8(%rsp)
14221+ testb $3,CS+8(%rsp)
14222 je error_kernelspace
14223 error_swapgs:
14224 SWAPGS
14225@@ -1529,6 +1869,16 @@ ENTRY(nmi)
14226 CFI_ADJUST_CFA_OFFSET 15*8
14227 call save_paranoid
14228 DEFAULT_FRAME 0
14229+#ifdef CONFIG_PAX_MEMORY_UDEREF
14230+ testb $3, CS(%rsp)
14231+ jnz 1f
14232+ pax_enter_kernel
14233+ jmp 2f
14234+1: pax_enter_kernel_user
14235+2:
14236+#else
14237+ pax_enter_kernel
14238+#endif
14239 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14240 movq %rsp,%rdi
14241 movq $-1,%rsi
14242@@ -1539,11 +1889,25 @@ ENTRY(nmi)
14243 DISABLE_INTERRUPTS(CLBR_NONE)
14244 testl %ebx,%ebx /* swapgs needed? */
14245 jnz nmi_restore
14246- testl $3,CS(%rsp)
14247+ testb $3,CS(%rsp)
14248 jnz nmi_userspace
14249+#ifdef CONFIG_PAX_MEMORY_UDEREF
14250+ pax_exit_kernel
14251+ SWAPGS_UNSAFE_STACK
14252+ RESTORE_ALL 8
14253+ jmp irq_return
14254+#endif
14255 nmi_swapgs:
14256+#ifdef CONFIG_PAX_MEMORY_UDEREF
14257+ pax_exit_kernel_user
14258+#else
14259+ pax_exit_kernel
14260+#endif
14261 SWAPGS_UNSAFE_STACK
14262+ RESTORE_ALL 8
14263+ jmp irq_return
14264 nmi_restore:
14265+ pax_exit_kernel
14266 RESTORE_ALL 8
14267 jmp irq_return
14268 nmi_userspace:
14269diff -urNp linux-2.6.32.45/arch/x86/kernel/ftrace.c linux-2.6.32.45/arch/x86/kernel/ftrace.c
14270--- linux-2.6.32.45/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
14271+++ linux-2.6.32.45/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
14272@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
14273 static void *mod_code_newcode; /* holds the text to write to the IP */
14274
14275 static unsigned nmi_wait_count;
14276-static atomic_t nmi_update_count = ATOMIC_INIT(0);
14277+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14278
14279 int ftrace_arch_read_dyn_info(char *buf, int size)
14280 {
14281@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
14282
14283 r = snprintf(buf, size, "%u %u",
14284 nmi_wait_count,
14285- atomic_read(&nmi_update_count));
14286+ atomic_read_unchecked(&nmi_update_count));
14287 return r;
14288 }
14289
14290@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
14291 {
14292 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14293 smp_rmb();
14294+ pax_open_kernel();
14295 ftrace_mod_code();
14296- atomic_inc(&nmi_update_count);
14297+ pax_close_kernel();
14298+ atomic_inc_unchecked(&nmi_update_count);
14299 }
14300 /* Must have previous changes seen before executions */
14301 smp_mb();
14302@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
14303
14304
14305
14306-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
14307+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
14308
14309 static unsigned char *ftrace_nop_replace(void)
14310 {
14311@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
14312 {
14313 unsigned char replaced[MCOUNT_INSN_SIZE];
14314
14315+ ip = ktla_ktva(ip);
14316+
14317 /*
14318 * Note: Due to modules and __init, code can
14319 * disappear and change, we need to protect against faulting
14320@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
14321 unsigned char old[MCOUNT_INSN_SIZE], *new;
14322 int ret;
14323
14324- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14325+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14326 new = ftrace_call_replace(ip, (unsigned long)func);
14327 ret = ftrace_modify_code(ip, old, new);
14328
14329@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
14330 switch (faulted) {
14331 case 0:
14332 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
14333- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
14334+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
14335 break;
14336 case 1:
14337 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
14338- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
14339+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
14340 break;
14341 case 2:
14342 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
14343- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
14344+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
14345 break;
14346 }
14347
14348@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
14349 {
14350 unsigned char code[MCOUNT_INSN_SIZE];
14351
14352+ ip = ktla_ktva(ip);
14353+
14354 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14355 return -EFAULT;
14356
14357diff -urNp linux-2.6.32.45/arch/x86/kernel/head32.c linux-2.6.32.45/arch/x86/kernel/head32.c
14358--- linux-2.6.32.45/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
14359+++ linux-2.6.32.45/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
14360@@ -16,6 +16,7 @@
14361 #include <asm/apic.h>
14362 #include <asm/io_apic.h>
14363 #include <asm/bios_ebda.h>
14364+#include <asm/boot.h>
14365
14366 static void __init i386_default_early_setup(void)
14367 {
14368@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
14369 {
14370 reserve_trampoline_memory();
14371
14372- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14373+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14374
14375 #ifdef CONFIG_BLK_DEV_INITRD
14376 /* Reserve INITRD */
14377diff -urNp linux-2.6.32.45/arch/x86/kernel/head_32.S linux-2.6.32.45/arch/x86/kernel/head_32.S
14378--- linux-2.6.32.45/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
14379+++ linux-2.6.32.45/arch/x86/kernel/head_32.S 2011-07-06 19:53:33.000000000 -0400
14380@@ -19,10 +19,17 @@
14381 #include <asm/setup.h>
14382 #include <asm/processor-flags.h>
14383 #include <asm/percpu.h>
14384+#include <asm/msr-index.h>
14385
14386 /* Physical address */
14387 #define pa(X) ((X) - __PAGE_OFFSET)
14388
14389+#ifdef CONFIG_PAX_KERNEXEC
14390+#define ta(X) (X)
14391+#else
14392+#define ta(X) ((X) - __PAGE_OFFSET)
14393+#endif
14394+
14395 /*
14396 * References to members of the new_cpu_data structure.
14397 */
14398@@ -52,11 +59,7 @@
14399 * and small than max_low_pfn, otherwise will waste some page table entries
14400 */
14401
14402-#if PTRS_PER_PMD > 1
14403-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14404-#else
14405-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14406-#endif
14407+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14408
14409 /* Enough space to fit pagetables for the low memory linear map */
14410 MAPPING_BEYOND_END = \
14411@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
14412 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14413
14414 /*
14415+ * Real beginning of normal "text" segment
14416+ */
14417+ENTRY(stext)
14418+ENTRY(_stext)
14419+
14420+/*
14421 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14422 * %esi points to the real-mode code as a 32-bit pointer.
14423 * CS and DS must be 4 GB flat segments, but we don't depend on
14424@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14425 * can.
14426 */
14427 __HEAD
14428+
14429+#ifdef CONFIG_PAX_KERNEXEC
14430+ jmp startup_32
14431+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14432+.fill PAGE_SIZE-5,1,0xcc
14433+#endif
14434+
14435 ENTRY(startup_32)
14436+ movl pa(stack_start),%ecx
14437+
14438 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
14439 us to not reload segments */
14440 testb $(1<<6), BP_loadflags(%esi)
14441@@ -95,7 +113,60 @@ ENTRY(startup_32)
14442 movl %eax,%es
14443 movl %eax,%fs
14444 movl %eax,%gs
14445+ movl %eax,%ss
14446 2:
14447+ leal -__PAGE_OFFSET(%ecx),%esp
14448+
14449+#ifdef CONFIG_SMP
14450+ movl $pa(cpu_gdt_table),%edi
14451+ movl $__per_cpu_load,%eax
14452+ movw %ax,__KERNEL_PERCPU + 2(%edi)
14453+ rorl $16,%eax
14454+ movb %al,__KERNEL_PERCPU + 4(%edi)
14455+ movb %ah,__KERNEL_PERCPU + 7(%edi)
14456+ movl $__per_cpu_end - 1,%eax
14457+ subl $__per_cpu_start,%eax
14458+ movw %ax,__KERNEL_PERCPU + 0(%edi)
14459+#endif
14460+
14461+#ifdef CONFIG_PAX_MEMORY_UDEREF
14462+ movl $NR_CPUS,%ecx
14463+ movl $pa(cpu_gdt_table),%edi
14464+1:
14465+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14466+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14467+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14468+ addl $PAGE_SIZE_asm,%edi
14469+ loop 1b
14470+#endif
14471+
14472+#ifdef CONFIG_PAX_KERNEXEC
14473+ movl $pa(boot_gdt),%edi
14474+ movl $__LOAD_PHYSICAL_ADDR,%eax
14475+ movw %ax,__BOOT_CS + 2(%edi)
14476+ rorl $16,%eax
14477+ movb %al,__BOOT_CS + 4(%edi)
14478+ movb %ah,__BOOT_CS + 7(%edi)
14479+ rorl $16,%eax
14480+
14481+ ljmp $(__BOOT_CS),$1f
14482+1:
14483+
14484+ movl $NR_CPUS,%ecx
14485+ movl $pa(cpu_gdt_table),%edi
14486+ addl $__PAGE_OFFSET,%eax
14487+1:
14488+ movw %ax,__KERNEL_CS + 2(%edi)
14489+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14490+ rorl $16,%eax
14491+ movb %al,__KERNEL_CS + 4(%edi)
14492+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14493+ movb %ah,__KERNEL_CS + 7(%edi)
14494+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14495+ rorl $16,%eax
14496+ addl $PAGE_SIZE_asm,%edi
14497+ loop 1b
14498+#endif
14499
14500 /*
14501 * Clear BSS first so that there are no surprises...
14502@@ -140,9 +211,7 @@ ENTRY(startup_32)
14503 cmpl $num_subarch_entries, %eax
14504 jae bad_subarch
14505
14506- movl pa(subarch_entries)(,%eax,4), %eax
14507- subl $__PAGE_OFFSET, %eax
14508- jmp *%eax
14509+ jmp *pa(subarch_entries)(,%eax,4)
14510
14511 bad_subarch:
14512 WEAK(lguest_entry)
14513@@ -154,10 +223,10 @@ WEAK(xen_entry)
14514 __INITDATA
14515
14516 subarch_entries:
14517- .long default_entry /* normal x86/PC */
14518- .long lguest_entry /* lguest hypervisor */
14519- .long xen_entry /* Xen hypervisor */
14520- .long default_entry /* Moorestown MID */
14521+ .long ta(default_entry) /* normal x86/PC */
14522+ .long ta(lguest_entry) /* lguest hypervisor */
14523+ .long ta(xen_entry) /* Xen hypervisor */
14524+ .long ta(default_entry) /* Moorestown MID */
14525 num_subarch_entries = (. - subarch_entries) / 4
14526 .previous
14527 #endif /* CONFIG_PARAVIRT */
14528@@ -218,8 +287,11 @@ default_entry:
14529 movl %eax, pa(max_pfn_mapped)
14530
14531 /* Do early initialization of the fixmap area */
14532- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14533- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14534+#ifdef CONFIG_COMPAT_VDSO
14535+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14536+#else
14537+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14538+#endif
14539 #else /* Not PAE */
14540
14541 page_pde_offset = (__PAGE_OFFSET >> 20);
14542@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14543 movl %eax, pa(max_pfn_mapped)
14544
14545 /* Do early initialization of the fixmap area */
14546- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14547- movl %eax,pa(swapper_pg_dir+0xffc)
14548+#ifdef CONFIG_COMPAT_VDSO
14549+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
14550+#else
14551+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
14552+#endif
14553 #endif
14554 jmp 3f
14555 /*
14556@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
14557 movl %eax,%es
14558 movl %eax,%fs
14559 movl %eax,%gs
14560+ movl pa(stack_start),%ecx
14561+ movl %eax,%ss
14562+ leal -__PAGE_OFFSET(%ecx),%esp
14563 #endif /* CONFIG_SMP */
14564 3:
14565
14566@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
14567 orl %edx,%eax
14568 movl %eax,%cr4
14569
14570+#ifdef CONFIG_X86_PAE
14571 btl $5, %eax # check if PAE is enabled
14572 jnc 6f
14573
14574@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
14575 cpuid
14576 cmpl $0x80000000, %eax
14577 jbe 6f
14578+
14579+ /* Clear bogus XD_DISABLE bits */
14580+ call verify_cpu
14581+
14582 mov $0x80000001, %eax
14583 cpuid
14584 /* Execute Disable bit supported? */
14585@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
14586 jnc 6f
14587
14588 /* Setup EFER (Extended Feature Enable Register) */
14589- movl $0xc0000080, %ecx
14590+ movl $MSR_EFER, %ecx
14591 rdmsr
14592
14593 btsl $11, %eax
14594 /* Make changes effective */
14595 wrmsr
14596
14597+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14598+ movl $1,pa(nx_enabled)
14599+#endif
14600+
14601 6:
14602
14603 /*
14604@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
14605 movl %eax,%cr0 /* ..and set paging (PG) bit */
14606 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
14607 1:
14608- /* Set up the stack pointer */
14609- lss stack_start,%esp
14610+ /* Shift the stack pointer to a virtual address */
14611+ addl $__PAGE_OFFSET, %esp
14612
14613 /*
14614 * Initialize eflags. Some BIOS's leave bits like NT set. This would
14615@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
14616
14617 #ifdef CONFIG_SMP
14618 cmpb $0, ready
14619- jz 1f /* Initial CPU cleans BSS */
14620- jmp checkCPUtype
14621-1:
14622+ jnz checkCPUtype
14623 #endif /* CONFIG_SMP */
14624
14625 /*
14626@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
14627 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14628 movl %eax,%ss # after changing gdt.
14629
14630- movl $(__USER_DS),%eax # DS/ES contains default USER segment
14631+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14632 movl %eax,%ds
14633 movl %eax,%es
14634
14635@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
14636 */
14637 cmpb $0,ready
14638 jne 1f
14639- movl $per_cpu__gdt_page,%eax
14640+ movl $cpu_gdt_table,%eax
14641 movl $per_cpu__stack_canary,%ecx
14642+#ifdef CONFIG_SMP
14643+ addl $__per_cpu_load,%ecx
14644+#endif
14645 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14646 shrl $16, %ecx
14647 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14648 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14649 1:
14650-#endif
14651 movl $(__KERNEL_STACK_CANARY),%eax
14652+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14653+ movl $(__USER_DS),%eax
14654+#else
14655+ xorl %eax,%eax
14656+#endif
14657 movl %eax,%gs
14658
14659 xorl %eax,%eax # Clear LDT
14660@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
14661
14662 cld # gcc2 wants the direction flag cleared at all times
14663 pushl $0 # fake return address for unwinder
14664-#ifdef CONFIG_SMP
14665- movb ready, %cl
14666 movb $1, ready
14667- cmpb $0,%cl # the first CPU calls start_kernel
14668- je 1f
14669- movl (stack_start), %esp
14670-1:
14671-#endif /* CONFIG_SMP */
14672 jmp *(initial_code)
14673
14674 /*
14675@@ -546,22 +631,22 @@ early_page_fault:
14676 jmp early_fault
14677
14678 early_fault:
14679- cld
14680 #ifdef CONFIG_PRINTK
14681+ cmpl $1,%ss:early_recursion_flag
14682+ je hlt_loop
14683+ incl %ss:early_recursion_flag
14684+ cld
14685 pusha
14686 movl $(__KERNEL_DS),%eax
14687 movl %eax,%ds
14688 movl %eax,%es
14689- cmpl $2,early_recursion_flag
14690- je hlt_loop
14691- incl early_recursion_flag
14692 movl %cr2,%eax
14693 pushl %eax
14694 pushl %edx /* trapno */
14695 pushl $fault_msg
14696 call printk
14697+; call dump_stack
14698 #endif
14699- call dump_stack
14700 hlt_loop:
14701 hlt
14702 jmp hlt_loop
14703@@ -569,8 +654,11 @@ hlt_loop:
14704 /* This is the default interrupt "handler" :-) */
14705 ALIGN
14706 ignore_int:
14707- cld
14708 #ifdef CONFIG_PRINTK
14709+ cmpl $2,%ss:early_recursion_flag
14710+ je hlt_loop
14711+ incl %ss:early_recursion_flag
14712+ cld
14713 pushl %eax
14714 pushl %ecx
14715 pushl %edx
14716@@ -579,9 +667,6 @@ ignore_int:
14717 movl $(__KERNEL_DS),%eax
14718 movl %eax,%ds
14719 movl %eax,%es
14720- cmpl $2,early_recursion_flag
14721- je hlt_loop
14722- incl early_recursion_flag
14723 pushl 16(%esp)
14724 pushl 24(%esp)
14725 pushl 32(%esp)
14726@@ -600,6 +685,8 @@ ignore_int:
14727 #endif
14728 iret
14729
14730+#include "verify_cpu.S"
14731+
14732 __REFDATA
14733 .align 4
14734 ENTRY(initial_code)
14735@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
14736 /*
14737 * BSS section
14738 */
14739-__PAGE_ALIGNED_BSS
14740- .align PAGE_SIZE_asm
14741 #ifdef CONFIG_X86_PAE
14742+.section .swapper_pg_pmd,"a",@progbits
14743 swapper_pg_pmd:
14744 .fill 1024*KPMDS,4,0
14745 #else
14746+.section .swapper_pg_dir,"a",@progbits
14747 ENTRY(swapper_pg_dir)
14748 .fill 1024,4,0
14749 #endif
14750+.section .swapper_pg_fixmap,"a",@progbits
14751 swapper_pg_fixmap:
14752 .fill 1024,4,0
14753 #ifdef CONFIG_X86_TRAMPOLINE
14754+.section .trampoline_pg_dir,"a",@progbits
14755 ENTRY(trampoline_pg_dir)
14756+#ifdef CONFIG_X86_PAE
14757+ .fill 4,8,0
14758+#else
14759 .fill 1024,4,0
14760 #endif
14761+#endif
14762+
14763+.section .empty_zero_page,"a",@progbits
14764 ENTRY(empty_zero_page)
14765 .fill 4096,1,0
14766
14767 /*
14768+ * The IDT has to be page-aligned to simplify the Pentium
14769+ * F0 0F bug workaround.. We have a special link segment
14770+ * for this.
14771+ */
14772+.section .idt,"a",@progbits
14773+ENTRY(idt_table)
14774+ .fill 256,8,0
14775+
14776+/*
14777 * This starts the data section.
14778 */
14779 #ifdef CONFIG_X86_PAE
14780-__PAGE_ALIGNED_DATA
14781- /* Page-aligned for the benefit of paravirt? */
14782- .align PAGE_SIZE_asm
14783+.section .swapper_pg_dir,"a",@progbits
14784+
14785 ENTRY(swapper_pg_dir)
14786 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14787 # if KPMDS == 3
14788@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
14789 # error "Kernel PMDs should be 1, 2 or 3"
14790 # endif
14791 .align PAGE_SIZE_asm /* needs to be page-sized too */
14792+
14793+#ifdef CONFIG_PAX_PER_CPU_PGD
14794+ENTRY(cpu_pgd)
14795+ .rept NR_CPUS
14796+ .fill 4,8,0
14797+ .endr
14798+#endif
14799+
14800 #endif
14801
14802 .data
14803+.balign 4
14804 ENTRY(stack_start)
14805- .long init_thread_union+THREAD_SIZE
14806- .long __BOOT_DS
14807+ .long init_thread_union+THREAD_SIZE-8
14808
14809 ready: .byte 0
14810
14811+.section .rodata,"a",@progbits
14812 early_recursion_flag:
14813 .long 0
14814
14815@@ -697,7 +809,7 @@ fault_msg:
14816 .word 0 # 32 bit align gdt_desc.address
14817 boot_gdt_descr:
14818 .word __BOOT_DS+7
14819- .long boot_gdt - __PAGE_OFFSET
14820+ .long pa(boot_gdt)
14821
14822 .word 0 # 32-bit align idt_desc.address
14823 idt_descr:
14824@@ -708,7 +820,7 @@ idt_descr:
14825 .word 0 # 32 bit align gdt_desc.address
14826 ENTRY(early_gdt_descr)
14827 .word GDT_ENTRIES*8-1
14828- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14829+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
14830
14831 /*
14832 * The boot_gdt must mirror the equivalent in setup.S and is
14833@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
14834 .align L1_CACHE_BYTES
14835 ENTRY(boot_gdt)
14836 .fill GDT_ENTRY_BOOT_CS,8,0
14837- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14838- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14839+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14840+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14841+
14842+ .align PAGE_SIZE_asm
14843+ENTRY(cpu_gdt_table)
14844+ .rept NR_CPUS
14845+ .quad 0x0000000000000000 /* NULL descriptor */
14846+ .quad 0x0000000000000000 /* 0x0b reserved */
14847+ .quad 0x0000000000000000 /* 0x13 reserved */
14848+ .quad 0x0000000000000000 /* 0x1b reserved */
14849+
14850+#ifdef CONFIG_PAX_KERNEXEC
14851+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14852+#else
14853+ .quad 0x0000000000000000 /* 0x20 unused */
14854+#endif
14855+
14856+ .quad 0x0000000000000000 /* 0x28 unused */
14857+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14858+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14859+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14860+ .quad 0x0000000000000000 /* 0x4b reserved */
14861+ .quad 0x0000000000000000 /* 0x53 reserved */
14862+ .quad 0x0000000000000000 /* 0x5b reserved */
14863+
14864+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14865+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14866+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14867+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14868+
14869+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14870+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14871+
14872+ /*
14873+ * Segments used for calling PnP BIOS have byte granularity.
14874+ * The code segments and data segments have fixed 64k limits,
14875+ * the transfer segment sizes are set at run time.
14876+ */
14877+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
14878+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
14879+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
14880+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
14881+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
14882+
14883+ /*
14884+ * The APM segments have byte granularity and their bases
14885+ * are set at run time. All have 64k limits.
14886+ */
14887+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14888+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14889+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
14890+
14891+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14892+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14893+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14894+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14895+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14896+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14897+
14898+ /* Be sure this is zeroed to avoid false validations in Xen */
14899+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14900+ .endr
14901diff -urNp linux-2.6.32.45/arch/x86/kernel/head_64.S linux-2.6.32.45/arch/x86/kernel/head_64.S
14902--- linux-2.6.32.45/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14903+++ linux-2.6.32.45/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14904@@ -19,6 +19,7 @@
14905 #include <asm/cache.h>
14906 #include <asm/processor-flags.h>
14907 #include <asm/percpu.h>
14908+#include <asm/cpufeature.h>
14909
14910 #ifdef CONFIG_PARAVIRT
14911 #include <asm/asm-offsets.h>
14912@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14913 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14914 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14915 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14916+L4_VMALLOC_START = pgd_index(VMALLOC_START)
14917+L3_VMALLOC_START = pud_index(VMALLOC_START)
14918+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14919+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14920
14921 .text
14922 __HEAD
14923@@ -85,35 +90,22 @@ startup_64:
14924 */
14925 addq %rbp, init_level4_pgt + 0(%rip)
14926 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14927+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14928+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14929 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14930
14931 addq %rbp, level3_ident_pgt + 0(%rip)
14932+#ifndef CONFIG_XEN
14933+ addq %rbp, level3_ident_pgt + 8(%rip)
14934+#endif
14935
14936- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14937- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14938+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14939
14940- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14941+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14942+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14943
14944- /* Add an Identity mapping if I am above 1G */
14945- leaq _text(%rip), %rdi
14946- andq $PMD_PAGE_MASK, %rdi
14947-
14948- movq %rdi, %rax
14949- shrq $PUD_SHIFT, %rax
14950- andq $(PTRS_PER_PUD - 1), %rax
14951- jz ident_complete
14952-
14953- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14954- leaq level3_ident_pgt(%rip), %rbx
14955- movq %rdx, 0(%rbx, %rax, 8)
14956-
14957- movq %rdi, %rax
14958- shrq $PMD_SHIFT, %rax
14959- andq $(PTRS_PER_PMD - 1), %rax
14960- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14961- leaq level2_spare_pgt(%rip), %rbx
14962- movq %rdx, 0(%rbx, %rax, 8)
14963-ident_complete:
14964+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14965+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14966
14967 /*
14968 * Fixup the kernel text+data virtual addresses. Note that
14969@@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
14970 * after the boot processor executes this code.
14971 */
14972
14973- /* Enable PAE mode and PGE */
14974- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14975+ /* Enable PAE mode and PSE/PGE */
14976+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14977 movq %rax, %cr4
14978
14979 /* Setup early boot stage 4 level pagetables. */
14980@@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
14981 movl $MSR_EFER, %ecx
14982 rdmsr
14983 btsl $_EFER_SCE, %eax /* Enable System Call */
14984- btl $20,%edi /* No Execute supported? */
14985+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14986 jnc 1f
14987 btsl $_EFER_NX, %eax
14988+ leaq init_level4_pgt(%rip), %rdi
14989+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14990+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14991+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14992 1: wrmsr /* Make changes effective */
14993
14994 /* Setup cr0 */
14995@@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
14996 .quad x86_64_start_kernel
14997 ENTRY(initial_gs)
14998 .quad INIT_PER_CPU_VAR(irq_stack_union)
14999- __FINITDATA
15000
15001 ENTRY(stack_start)
15002 .quad init_thread_union+THREAD_SIZE-8
15003 .word 0
15004+ __FINITDATA
15005
15006 bad_address:
15007 jmp bad_address
15008
15009- .section ".init.text","ax"
15010+ __INIT
15011 #ifdef CONFIG_EARLY_PRINTK
15012 .globl early_idt_handlers
15013 early_idt_handlers:
15014@@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
15015 #endif /* EARLY_PRINTK */
15016 1: hlt
15017 jmp 1b
15018+ .previous
15019
15020 #ifdef CONFIG_EARLY_PRINTK
15021+ __INITDATA
15022 early_recursion_flag:
15023 .long 0
15024+ .previous
15025
15026+ .section .rodata,"a",@progbits
15027 early_idt_msg:
15028 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15029 early_idt_ripmsg:
15030 .asciz "RIP %s\n"
15031-#endif /* CONFIG_EARLY_PRINTK */
15032 .previous
15033+#endif /* CONFIG_EARLY_PRINTK */
15034
15035+ .section .rodata,"a",@progbits
15036 #define NEXT_PAGE(name) \
15037 .balign PAGE_SIZE; \
15038 ENTRY(name)
15039@@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
15040 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15041 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15042 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15043+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
15044+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
15045+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15046+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15047 .org init_level4_pgt + L4_START_KERNEL*8, 0
15048 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15049 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15050
15051+#ifdef CONFIG_PAX_PER_CPU_PGD
15052+NEXT_PAGE(cpu_pgd)
15053+ .rept NR_CPUS
15054+ .fill 512,8,0
15055+ .endr
15056+#endif
15057+
15058 NEXT_PAGE(level3_ident_pgt)
15059 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15060+#ifdef CONFIG_XEN
15061 .fill 511,8,0
15062+#else
15063+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15064+ .fill 510,8,0
15065+#endif
15066+
15067+NEXT_PAGE(level3_vmalloc_pgt)
15068+ .fill 512,8,0
15069+
15070+NEXT_PAGE(level3_vmemmap_pgt)
15071+ .fill L3_VMEMMAP_START,8,0
15072+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15073
15074 NEXT_PAGE(level3_kernel_pgt)
15075 .fill L3_START_KERNEL,8,0
15076@@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
15077 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15078 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15079
15080+NEXT_PAGE(level2_vmemmap_pgt)
15081+ .fill 512,8,0
15082+
15083 NEXT_PAGE(level2_fixmap_pgt)
15084- .fill 506,8,0
15085- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15086- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15087- .fill 5,8,0
15088+ .fill 507,8,0
15089+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15090+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15091+ .fill 4,8,0
15092
15093-NEXT_PAGE(level1_fixmap_pgt)
15094+NEXT_PAGE(level1_vsyscall_pgt)
15095 .fill 512,8,0
15096
15097-NEXT_PAGE(level2_ident_pgt)
15098- /* Since I easily can, map the first 1G.
15099+ /* Since I easily can, map the first 2G.
15100 * Don't set NX because code runs from these pages.
15101 */
15102- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15103+NEXT_PAGE(level2_ident_pgt)
15104+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15105
15106 NEXT_PAGE(level2_kernel_pgt)
15107 /*
15108@@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
15109 * If you want to increase this then increase MODULES_VADDR
15110 * too.)
15111 */
15112- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15113- KERNEL_IMAGE_SIZE/PMD_SIZE)
15114-
15115-NEXT_PAGE(level2_spare_pgt)
15116- .fill 512, 8, 0
15117+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15118
15119 #undef PMDS
15120 #undef NEXT_PAGE
15121
15122- .data
15123+ .align PAGE_SIZE
15124+ENTRY(cpu_gdt_table)
15125+ .rept NR_CPUS
15126+ .quad 0x0000000000000000 /* NULL descriptor */
15127+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15128+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
15129+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
15130+ .quad 0x00cffb000000ffff /* __USER32_CS */
15131+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15132+ .quad 0x00affb000000ffff /* __USER_CS */
15133+
15134+#ifdef CONFIG_PAX_KERNEXEC
15135+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15136+#else
15137+ .quad 0x0 /* unused */
15138+#endif
15139+
15140+ .quad 0,0 /* TSS */
15141+ .quad 0,0 /* LDT */
15142+ .quad 0,0,0 /* three TLS descriptors */
15143+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
15144+ /* asm/segment.h:GDT_ENTRIES must match this */
15145+
15146+ /* zero the remaining page */
15147+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15148+ .endr
15149+
15150 .align 16
15151 .globl early_gdt_descr
15152 early_gdt_descr:
15153 .word GDT_ENTRIES*8-1
15154 early_gdt_descr_base:
15155- .quad INIT_PER_CPU_VAR(gdt_page)
15156+ .quad cpu_gdt_table
15157
15158 ENTRY(phys_base)
15159 /* This must match the first entry in level2_kernel_pgt */
15160 .quad 0x0000000000000000
15161
15162 #include "../../x86/xen/xen-head.S"
15163-
15164- .section .bss, "aw", @nobits
15165+
15166+ .section .rodata,"a",@progbits
15167 .align L1_CACHE_BYTES
15168 ENTRY(idt_table)
15169- .skip IDT_ENTRIES * 16
15170+ .fill 512,8,0
15171
15172 __PAGE_ALIGNED_BSS
15173 .align PAGE_SIZE
15174diff -urNp linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c
15175--- linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
15176+++ linux-2.6.32.45/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
15177@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15178 EXPORT_SYMBOL(cmpxchg8b_emu);
15179 #endif
15180
15181+EXPORT_SYMBOL_GPL(cpu_gdt_table);
15182+
15183 /* Networking helper routines. */
15184 EXPORT_SYMBOL(csum_partial_copy_generic);
15185+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15186+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15187
15188 EXPORT_SYMBOL(__get_user_1);
15189 EXPORT_SYMBOL(__get_user_2);
15190@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15191
15192 EXPORT_SYMBOL(csum_partial);
15193 EXPORT_SYMBOL(empty_zero_page);
15194+
15195+#ifdef CONFIG_PAX_KERNEXEC
15196+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15197+#endif
15198diff -urNp linux-2.6.32.45/arch/x86/kernel/i8259.c linux-2.6.32.45/arch/x86/kernel/i8259.c
15199--- linux-2.6.32.45/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
15200+++ linux-2.6.32.45/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
15201@@ -208,7 +208,7 @@ spurious_8259A_irq:
15202 "spurious 8259A interrupt: IRQ%d.\n", irq);
15203 spurious_irq_mask |= irqmask;
15204 }
15205- atomic_inc(&irq_err_count);
15206+ atomic_inc_unchecked(&irq_err_count);
15207 /*
15208 * Theoretically we do not have to handle this IRQ,
15209 * but in Linux this does not cause problems and is
15210diff -urNp linux-2.6.32.45/arch/x86/kernel/init_task.c linux-2.6.32.45/arch/x86/kernel/init_task.c
15211--- linux-2.6.32.45/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
15212+++ linux-2.6.32.45/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
15213@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
15214 * way process stacks are handled. This is done by having a special
15215 * "init_task" linker map entry..
15216 */
15217-union thread_union init_thread_union __init_task_data =
15218- { INIT_THREAD_INFO(init_task) };
15219+union thread_union init_thread_union __init_task_data;
15220
15221 /*
15222 * Initial task structure.
15223@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15224 * section. Since TSS's are completely CPU-local, we want them
15225 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15226 */
15227-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15228-
15229+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15230+EXPORT_SYMBOL(init_tss);
15231diff -urNp linux-2.6.32.45/arch/x86/kernel/ioport.c linux-2.6.32.45/arch/x86/kernel/ioport.c
15232--- linux-2.6.32.45/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
15233+++ linux-2.6.32.45/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
15234@@ -6,6 +6,7 @@
15235 #include <linux/sched.h>
15236 #include <linux/kernel.h>
15237 #include <linux/capability.h>
15238+#include <linux/security.h>
15239 #include <linux/errno.h>
15240 #include <linux/types.h>
15241 #include <linux/ioport.h>
15242@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
15243
15244 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15245 return -EINVAL;
15246+#ifdef CONFIG_GRKERNSEC_IO
15247+ if (turn_on && grsec_disable_privio) {
15248+ gr_handle_ioperm();
15249+ return -EPERM;
15250+ }
15251+#endif
15252 if (turn_on && !capable(CAP_SYS_RAWIO))
15253 return -EPERM;
15254
15255@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
15256 * because the ->io_bitmap_max value must match the bitmap
15257 * contents:
15258 */
15259- tss = &per_cpu(init_tss, get_cpu());
15260+ tss = init_tss + get_cpu();
15261
15262 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
15263
15264@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
15265 return -EINVAL;
15266 /* Trying to gain more privileges? */
15267 if (level > old) {
15268+#ifdef CONFIG_GRKERNSEC_IO
15269+ if (grsec_disable_privio) {
15270+ gr_handle_iopl();
15271+ return -EPERM;
15272+ }
15273+#endif
15274 if (!capable(CAP_SYS_RAWIO))
15275 return -EPERM;
15276 }
15277diff -urNp linux-2.6.32.45/arch/x86/kernel/irq_32.c linux-2.6.32.45/arch/x86/kernel/irq_32.c
15278--- linux-2.6.32.45/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
15279+++ linux-2.6.32.45/arch/x86/kernel/irq_32.c 2011-07-06 19:53:33.000000000 -0400
15280@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
15281 __asm__ __volatile__("andl %%esp,%0" :
15282 "=r" (sp) : "0" (THREAD_SIZE - 1));
15283
15284- return sp < (sizeof(struct thread_info) + STACK_WARN);
15285+ return sp < STACK_WARN;
15286 }
15287
15288 static void print_stack_overflow(void)
15289@@ -54,9 +54,9 @@ static inline void print_stack_overflow(
15290 * per-CPU IRQ handling contexts (thread information and stack)
15291 */
15292 union irq_ctx {
15293- struct thread_info tinfo;
15294- u32 stack[THREAD_SIZE/sizeof(u32)];
15295-} __attribute__((aligned(PAGE_SIZE)));
15296+ unsigned long previous_esp;
15297+ u32 stack[THREAD_SIZE/sizeof(u32)];
15298+} __attribute__((aligned(THREAD_SIZE)));
15299
15300 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15301 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
15302@@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
15303 static inline int
15304 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15305 {
15306- union irq_ctx *curctx, *irqctx;
15307+ union irq_ctx *irqctx;
15308 u32 *isp, arg1, arg2;
15309
15310- curctx = (union irq_ctx *) current_thread_info();
15311 irqctx = __get_cpu_var(hardirq_ctx);
15312
15313 /*
15314@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struc
15315 * handler) we can't do that and just have to keep using the
15316 * current stack (which is the irq stack already after all)
15317 */
15318- if (unlikely(curctx == irqctx))
15319+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15320 return 0;
15321
15322 /* build the stack frame on the IRQ stack */
15323- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15324- irqctx->tinfo.task = curctx->tinfo.task;
15325- irqctx->tinfo.previous_esp = current_stack_pointer;
15326+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15327+ irqctx->previous_esp = current_stack_pointer;
15328
15329- /*
15330- * Copy the softirq bits in preempt_count so that the
15331- * softirq checks work in the hardirq context.
15332- */
15333- irqctx->tinfo.preempt_count =
15334- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15335- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15336+#ifdef CONFIG_PAX_MEMORY_UDEREF
15337+ __set_fs(MAKE_MM_SEG(0));
15338+#endif
15339
15340 if (unlikely(overflow))
15341 call_on_stack(print_stack_overflow, isp);
15342@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struc
15343 : "0" (irq), "1" (desc), "2" (isp),
15344 "D" (desc->handle_irq)
15345 : "memory", "cc", "ecx");
15346+
15347+#ifdef CONFIG_PAX_MEMORY_UDEREF
15348+ __set_fs(current_thread_info()->addr_limit);
15349+#endif
15350+
15351 return 1;
15352 }
15353
15354@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struc
15355 */
15356 void __cpuinit irq_ctx_init(int cpu)
15357 {
15358- union irq_ctx *irqctx;
15359-
15360 if (per_cpu(hardirq_ctx, cpu))
15361 return;
15362
15363- irqctx = &per_cpu(hardirq_stack, cpu);
15364- irqctx->tinfo.task = NULL;
15365- irqctx->tinfo.exec_domain = NULL;
15366- irqctx->tinfo.cpu = cpu;
15367- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15368- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15369-
15370- per_cpu(hardirq_ctx, cpu) = irqctx;
15371-
15372- irqctx = &per_cpu(softirq_stack, cpu);
15373- irqctx->tinfo.task = NULL;
15374- irqctx->tinfo.exec_domain = NULL;
15375- irqctx->tinfo.cpu = cpu;
15376- irqctx->tinfo.preempt_count = 0;
15377- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15378-
15379- per_cpu(softirq_ctx, cpu) = irqctx;
15380+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
15381+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
15382
15383 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15384 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15385@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
15386 asmlinkage void do_softirq(void)
15387 {
15388 unsigned long flags;
15389- struct thread_info *curctx;
15390 union irq_ctx *irqctx;
15391 u32 *isp;
15392
15393@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
15394 local_irq_save(flags);
15395
15396 if (local_softirq_pending()) {
15397- curctx = current_thread_info();
15398 irqctx = __get_cpu_var(softirq_ctx);
15399- irqctx->tinfo.task = curctx->task;
15400- irqctx->tinfo.previous_esp = current_stack_pointer;
15401+ irqctx->previous_esp = current_stack_pointer;
15402
15403 /* build the stack frame on the softirq stack */
15404- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15405+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15406+
15407+#ifdef CONFIG_PAX_MEMORY_UDEREF
15408+ __set_fs(MAKE_MM_SEG(0));
15409+#endif
15410
15411 call_on_stack(__do_softirq, isp);
15412+
15413+#ifdef CONFIG_PAX_MEMORY_UDEREF
15414+ __set_fs(current_thread_info()->addr_limit);
15415+#endif
15416+
15417 /*
15418 * Shouldnt happen, we returned above if in_interrupt():
15419 */
15420diff -urNp linux-2.6.32.45/arch/x86/kernel/irq.c linux-2.6.32.45/arch/x86/kernel/irq.c
15421--- linux-2.6.32.45/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
15422+++ linux-2.6.32.45/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
15423@@ -15,7 +15,7 @@
15424 #include <asm/mce.h>
15425 #include <asm/hw_irq.h>
15426
15427-atomic_t irq_err_count;
15428+atomic_unchecked_t irq_err_count;
15429
15430 /* Function pointer for generic interrupt vector handling */
15431 void (*generic_interrupt_extension)(void) = NULL;
15432@@ -114,9 +114,9 @@ static int show_other_interrupts(struct
15433 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15434 seq_printf(p, " Machine check polls\n");
15435 #endif
15436- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15437+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15438 #if defined(CONFIG_X86_IO_APIC)
15439- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15440+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15441 #endif
15442 return 0;
15443 }
15444@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15445
15446 u64 arch_irq_stat(void)
15447 {
15448- u64 sum = atomic_read(&irq_err_count);
15449+ u64 sum = atomic_read_unchecked(&irq_err_count);
15450
15451 #ifdef CONFIG_X86_IO_APIC
15452- sum += atomic_read(&irq_mis_count);
15453+ sum += atomic_read_unchecked(&irq_mis_count);
15454 #endif
15455 return sum;
15456 }
15457diff -urNp linux-2.6.32.45/arch/x86/kernel/kgdb.c linux-2.6.32.45/arch/x86/kernel/kgdb.c
15458--- linux-2.6.32.45/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
15459+++ linux-2.6.32.45/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
15460@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
15461
15462 /* clear the trace bit */
15463 linux_regs->flags &= ~X86_EFLAGS_TF;
15464- atomic_set(&kgdb_cpu_doing_single_step, -1);
15465+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15466
15467 /* set the trace bit if we're stepping */
15468 if (remcomInBuffer[0] == 's') {
15469 linux_regs->flags |= X86_EFLAGS_TF;
15470 kgdb_single_step = 1;
15471- atomic_set(&kgdb_cpu_doing_single_step,
15472+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15473 raw_smp_processor_id());
15474 }
15475
15476@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
15477 break;
15478
15479 case DIE_DEBUG:
15480- if (atomic_read(&kgdb_cpu_doing_single_step) ==
15481+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
15482 raw_smp_processor_id()) {
15483 if (user_mode(regs))
15484 return single_step_cont(regs, args);
15485@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
15486 return instruction_pointer(regs);
15487 }
15488
15489-struct kgdb_arch arch_kgdb_ops = {
15490+const struct kgdb_arch arch_kgdb_ops = {
15491 /* Breakpoint instruction: */
15492 .gdb_bpt_instr = { 0xcc },
15493 .flags = KGDB_HW_BREAKPOINT,
15494diff -urNp linux-2.6.32.45/arch/x86/kernel/kprobes.c linux-2.6.32.45/arch/x86/kernel/kprobes.c
15495--- linux-2.6.32.45/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
15496+++ linux-2.6.32.45/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
15497@@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
15498 char op;
15499 s32 raddr;
15500 } __attribute__((packed)) * jop;
15501- jop = (struct __arch_jmp_op *)from;
15502+
15503+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
15504+
15505+ pax_open_kernel();
15506 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
15507 jop->op = RELATIVEJUMP_INSTRUCTION;
15508+ pax_close_kernel();
15509 }
15510
15511 /*
15512@@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
15513 kprobe_opcode_t opcode;
15514 kprobe_opcode_t *orig_opcodes = opcodes;
15515
15516- if (search_exception_tables((unsigned long)opcodes))
15517+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15518 return 0; /* Page fault may occur on this address. */
15519
15520 retry:
15521@@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
15522 disp = (u8 *) p->addr + *((s32 *) insn) -
15523 (u8 *) p->ainsn.insn;
15524 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
15525+ pax_open_kernel();
15526 *(s32 *)insn = (s32) disp;
15527+ pax_close_kernel();
15528 }
15529 }
15530 #endif
15531@@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
15532
15533 static void __kprobes arch_copy_kprobe(struct kprobe *p)
15534 {
15535- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15536+ pax_open_kernel();
15537+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15538+ pax_close_kernel();
15539
15540 fix_riprel(p);
15541
15542- if (can_boost(p->addr))
15543+ if (can_boost(ktla_ktva(p->addr)))
15544 p->ainsn.boostable = 0;
15545 else
15546 p->ainsn.boostable = -1;
15547
15548- p->opcode = *p->addr;
15549+ p->opcode = *(ktla_ktva(p->addr));
15550 }
15551
15552 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15553@@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
15554 if (p->opcode == BREAKPOINT_INSTRUCTION)
15555 regs->ip = (unsigned long)p->addr;
15556 else
15557- regs->ip = (unsigned long)p->ainsn.insn;
15558+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15559 }
15560
15561 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
15562@@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
15563 if (p->ainsn.boostable == 1 && !p->post_handler) {
15564 /* Boost up -- we can execute copied instructions directly */
15565 reset_current_kprobe();
15566- regs->ip = (unsigned long)p->ainsn.insn;
15567+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15568 preempt_enable_no_resched();
15569 return;
15570 }
15571@@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
15572 struct kprobe_ctlblk *kcb;
15573
15574 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
15575- if (*addr != BREAKPOINT_INSTRUCTION) {
15576+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15577 /*
15578 * The breakpoint instruction was removed right
15579 * after we hit it. Another cpu has removed
15580@@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
15581 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15582 {
15583 unsigned long *tos = stack_addr(regs);
15584- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15585+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15586 unsigned long orig_ip = (unsigned long)p->addr;
15587 kprobe_opcode_t *insn = p->ainsn.insn;
15588
15589@@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
15590 struct die_args *args = data;
15591 int ret = NOTIFY_DONE;
15592
15593- if (args->regs && user_mode_vm(args->regs))
15594+ if (args->regs && user_mode(args->regs))
15595 return ret;
15596
15597 switch (val) {
15598diff -urNp linux-2.6.32.45/arch/x86/kernel/ldt.c linux-2.6.32.45/arch/x86/kernel/ldt.c
15599--- linux-2.6.32.45/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
15600+++ linux-2.6.32.45/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
15601@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
15602 if (reload) {
15603 #ifdef CONFIG_SMP
15604 preempt_disable();
15605- load_LDT(pc);
15606+ load_LDT_nolock(pc);
15607 if (!cpumask_equal(mm_cpumask(current->mm),
15608 cpumask_of(smp_processor_id())))
15609 smp_call_function(flush_ldt, current->mm, 1);
15610 preempt_enable();
15611 #else
15612- load_LDT(pc);
15613+ load_LDT_nolock(pc);
15614 #endif
15615 }
15616 if (oldsize) {
15617@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
15618 return err;
15619
15620 for (i = 0; i < old->size; i++)
15621- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15622+ write_ldt_entry(new->ldt, i, old->ldt + i);
15623 return 0;
15624 }
15625
15626@@ -115,6 +115,24 @@ int init_new_context(struct task_struct
15627 retval = copy_ldt(&mm->context, &old_mm->context);
15628 mutex_unlock(&old_mm->context.lock);
15629 }
15630+
15631+ if (tsk == current) {
15632+ mm->context.vdso = 0;
15633+
15634+#ifdef CONFIG_X86_32
15635+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15636+ mm->context.user_cs_base = 0UL;
15637+ mm->context.user_cs_limit = ~0UL;
15638+
15639+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15640+ cpus_clear(mm->context.cpu_user_cs_mask);
15641+#endif
15642+
15643+#endif
15644+#endif
15645+
15646+ }
15647+
15648 return retval;
15649 }
15650
15651@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
15652 }
15653 }
15654
15655+#ifdef CONFIG_PAX_SEGMEXEC
15656+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15657+ error = -EINVAL;
15658+ goto out_unlock;
15659+ }
15660+#endif
15661+
15662 fill_ldt(&ldt, &ldt_info);
15663 if (oldmode)
15664 ldt.avl = 0;
15665diff -urNp linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c
15666--- linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
15667+++ linux-2.6.32.45/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
15668@@ -26,7 +26,7 @@
15669 #include <asm/system.h>
15670 #include <asm/cacheflush.h>
15671
15672-static void set_idt(void *newidt, __u16 limit)
15673+static void set_idt(struct desc_struct *newidt, __u16 limit)
15674 {
15675 struct desc_ptr curidt;
15676
15677@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
15678 }
15679
15680
15681-static void set_gdt(void *newgdt, __u16 limit)
15682+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15683 {
15684 struct desc_ptr curgdt;
15685
15686@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15687 }
15688
15689 control_page = page_address(image->control_code_page);
15690- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15691+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15692
15693 relocate_kernel_ptr = control_page;
15694 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15695diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_amd.c linux-2.6.32.45/arch/x86/kernel/microcode_amd.c
15696--- linux-2.6.32.45/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
15697+++ linux-2.6.32.45/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
15698@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
15699 uci->mc = NULL;
15700 }
15701
15702-static struct microcode_ops microcode_amd_ops = {
15703+static const struct microcode_ops microcode_amd_ops = {
15704 .request_microcode_user = request_microcode_user,
15705 .request_microcode_fw = request_microcode_fw,
15706 .collect_cpu_info = collect_cpu_info_amd,
15707@@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
15708 .microcode_fini_cpu = microcode_fini_cpu_amd,
15709 };
15710
15711-struct microcode_ops * __init init_amd_microcode(void)
15712+const struct microcode_ops * __init init_amd_microcode(void)
15713 {
15714 return &microcode_amd_ops;
15715 }
15716diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_core.c linux-2.6.32.45/arch/x86/kernel/microcode_core.c
15717--- linux-2.6.32.45/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15718+++ linux-2.6.32.45/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15719@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15720
15721 #define MICROCODE_VERSION "2.00"
15722
15723-static struct microcode_ops *microcode_ops;
15724+static const struct microcode_ops *microcode_ops;
15725
15726 /*
15727 * Synchronization.
15728diff -urNp linux-2.6.32.45/arch/x86/kernel/microcode_intel.c linux-2.6.32.45/arch/x86/kernel/microcode_intel.c
15729--- linux-2.6.32.45/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15730+++ linux-2.6.32.45/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15731@@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15732
15733 static int get_ucode_user(void *to, const void *from, size_t n)
15734 {
15735- return copy_from_user(to, from, n);
15736+ return copy_from_user(to, (__force const void __user *)from, n);
15737 }
15738
15739 static enum ucode_state
15740 request_microcode_user(int cpu, const void __user *buf, size_t size)
15741 {
15742- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15743+ return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15744 }
15745
15746 static void microcode_fini_cpu(int cpu)
15747@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15748 uci->mc = NULL;
15749 }
15750
15751-static struct microcode_ops microcode_intel_ops = {
15752+static const struct microcode_ops microcode_intel_ops = {
15753 .request_microcode_user = request_microcode_user,
15754 .request_microcode_fw = request_microcode_fw,
15755 .collect_cpu_info = collect_cpu_info,
15756@@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15757 .microcode_fini_cpu = microcode_fini_cpu,
15758 };
15759
15760-struct microcode_ops * __init init_intel_microcode(void)
15761+const struct microcode_ops * __init init_intel_microcode(void)
15762 {
15763 return &microcode_intel_ops;
15764 }
15765diff -urNp linux-2.6.32.45/arch/x86/kernel/module.c linux-2.6.32.45/arch/x86/kernel/module.c
15766--- linux-2.6.32.45/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15767+++ linux-2.6.32.45/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15768@@ -34,7 +34,7 @@
15769 #define DEBUGP(fmt...)
15770 #endif
15771
15772-void *module_alloc(unsigned long size)
15773+static void *__module_alloc(unsigned long size, pgprot_t prot)
15774 {
15775 struct vm_struct *area;
15776
15777@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15778 if (!area)
15779 return NULL;
15780
15781- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15782- PAGE_KERNEL_EXEC);
15783+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15784+}
15785+
15786+void *module_alloc(unsigned long size)
15787+{
15788+
15789+#ifdef CONFIG_PAX_KERNEXEC
15790+ return __module_alloc(size, PAGE_KERNEL);
15791+#else
15792+ return __module_alloc(size, PAGE_KERNEL_EXEC);
15793+#endif
15794+
15795 }
15796
15797 /* Free memory returned from module_alloc */
15798@@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15799 vfree(module_region);
15800 }
15801
15802+#ifdef CONFIG_PAX_KERNEXEC
15803+#ifdef CONFIG_X86_32
15804+void *module_alloc_exec(unsigned long size)
15805+{
15806+ struct vm_struct *area;
15807+
15808+ if (size == 0)
15809+ return NULL;
15810+
15811+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15812+ return area ? area->addr : NULL;
15813+}
15814+EXPORT_SYMBOL(module_alloc_exec);
15815+
15816+void module_free_exec(struct module *mod, void *module_region)
15817+{
15818+ vunmap(module_region);
15819+}
15820+EXPORT_SYMBOL(module_free_exec);
15821+#else
15822+void module_free_exec(struct module *mod, void *module_region)
15823+{
15824+ module_free(mod, module_region);
15825+}
15826+EXPORT_SYMBOL(module_free_exec);
15827+
15828+void *module_alloc_exec(unsigned long size)
15829+{
15830+ return __module_alloc(size, PAGE_KERNEL_RX);
15831+}
15832+EXPORT_SYMBOL(module_alloc_exec);
15833+#endif
15834+#endif
15835+
15836 /* We don't need anything special. */
15837 int module_frob_arch_sections(Elf_Ehdr *hdr,
15838 Elf_Shdr *sechdrs,
15839@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15840 unsigned int i;
15841 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15842 Elf32_Sym *sym;
15843- uint32_t *location;
15844+ uint32_t *plocation, location;
15845
15846 DEBUGP("Applying relocate section %u to %u\n", relsec,
15847 sechdrs[relsec].sh_info);
15848 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15849 /* This is where to make the change */
15850- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15851- + rel[i].r_offset;
15852+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15853+ location = (uint32_t)plocation;
15854+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15855+ plocation = ktla_ktva((void *)plocation);
15856 /* This is the symbol it is referring to. Note that all
15857 undefined symbols have been resolved. */
15858 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15859@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15860 switch (ELF32_R_TYPE(rel[i].r_info)) {
15861 case R_386_32:
15862 /* We add the value into the location given */
15863- *location += sym->st_value;
15864+ pax_open_kernel();
15865+ *plocation += sym->st_value;
15866+ pax_close_kernel();
15867 break;
15868 case R_386_PC32:
15869 /* Add the value, subtract its postition */
15870- *location += sym->st_value - (uint32_t)location;
15871+ pax_open_kernel();
15872+ *plocation += sym->st_value - location;
15873+ pax_close_kernel();
15874 break;
15875 default:
15876 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15877@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15878 case R_X86_64_NONE:
15879 break;
15880 case R_X86_64_64:
15881+ pax_open_kernel();
15882 *(u64 *)loc = val;
15883+ pax_close_kernel();
15884 break;
15885 case R_X86_64_32:
15886+ pax_open_kernel();
15887 *(u32 *)loc = val;
15888+ pax_close_kernel();
15889 if (val != *(u32 *)loc)
15890 goto overflow;
15891 break;
15892 case R_X86_64_32S:
15893+ pax_open_kernel();
15894 *(s32 *)loc = val;
15895+ pax_close_kernel();
15896 if ((s64)val != *(s32 *)loc)
15897 goto overflow;
15898 break;
15899 case R_X86_64_PC32:
15900 val -= (u64)loc;
15901+ pax_open_kernel();
15902 *(u32 *)loc = val;
15903+ pax_close_kernel();
15904+
15905 #if 0
15906 if ((s64)val != *(s32 *)loc)
15907 goto overflow;
15908diff -urNp linux-2.6.32.45/arch/x86/kernel/paravirt.c linux-2.6.32.45/arch/x86/kernel/paravirt.c
15909--- linux-2.6.32.45/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15910+++ linux-2.6.32.45/arch/x86/kernel/paravirt.c 2011-08-05 20:33:55.000000000 -0400
15911@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
15912 {
15913 return x;
15914 }
15915+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15916+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
15917+#endif
15918
15919 void __init default_banner(void)
15920 {
15921@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15922 * corresponding structure. */
15923 static void *get_call_destination(u8 type)
15924 {
15925- struct paravirt_patch_template tmpl = {
15926+ const struct paravirt_patch_template tmpl = {
15927 .pv_init_ops = pv_init_ops,
15928 .pv_time_ops = pv_time_ops,
15929 .pv_cpu_ops = pv_cpu_ops,
15930@@ -133,6 +136,8 @@ static void *get_call_destination(u8 typ
15931 .pv_lock_ops = pv_lock_ops,
15932 #endif
15933 };
15934+
15935+ pax_track_stack();
15936 return *((void **)&tmpl + type);
15937 }
15938
15939@@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type,
15940 if (opfunc == NULL)
15941 /* If there's no function, patch it with a ud2a (BUG) */
15942 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15943- else if (opfunc == _paravirt_nop)
15944+ else if (opfunc == (void *)_paravirt_nop)
15945 /* If the operation is a nop, then nop the callsite */
15946 ret = paravirt_patch_nop();
15947
15948 /* identity functions just return their single argument */
15949- else if (opfunc == _paravirt_ident_32)
15950+ else if (opfunc == (void *)_paravirt_ident_32)
15951 ret = paravirt_patch_ident_32(insnbuf, len);
15952- else if (opfunc == _paravirt_ident_64)
15953+ else if (opfunc == (void *)_paravirt_ident_64)
15954+ ret = paravirt_patch_ident_64(insnbuf, len);
15955+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15956+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
15957 ret = paravirt_patch_ident_64(insnbuf, len);
15958+#endif
15959
15960 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15961 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
15962@@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insn
15963 if (insn_len > len || start == NULL)
15964 insn_len = len;
15965 else
15966- memcpy(insnbuf, start, insn_len);
15967+ memcpy(insnbuf, ktla_ktva(start), insn_len);
15968
15969 return insn_len;
15970 }
15971@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
15972 preempt_enable();
15973 }
15974
15975-struct pv_info pv_info = {
15976+struct pv_info pv_info __read_only = {
15977 .name = "bare hardware",
15978 .paravirt_enabled = 0,
15979 .kernel_rpl = 0,
15980 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
15981 };
15982
15983-struct pv_init_ops pv_init_ops = {
15984+struct pv_init_ops pv_init_ops __read_only = {
15985 .patch = native_patch,
15986 };
15987
15988-struct pv_time_ops pv_time_ops = {
15989+struct pv_time_ops pv_time_ops __read_only = {
15990 .sched_clock = native_sched_clock,
15991 };
15992
15993-struct pv_irq_ops pv_irq_ops = {
15994+struct pv_irq_ops pv_irq_ops __read_only = {
15995 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15996 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15997 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15998@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
15999 #endif
16000 };
16001
16002-struct pv_cpu_ops pv_cpu_ops = {
16003+struct pv_cpu_ops pv_cpu_ops __read_only = {
16004 .cpuid = native_cpuid,
16005 .get_debugreg = native_get_debugreg,
16006 .set_debugreg = native_set_debugreg,
16007@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16008 .end_context_switch = paravirt_nop,
16009 };
16010
16011-struct pv_apic_ops pv_apic_ops = {
16012+struct pv_apic_ops pv_apic_ops __read_only = {
16013 #ifdef CONFIG_X86_LOCAL_APIC
16014 .startup_ipi_hook = paravirt_nop,
16015 #endif
16016 };
16017
16018-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16019+#ifdef CONFIG_X86_32
16020+#ifdef CONFIG_X86_PAE
16021+/* 64-bit pagetable entries */
16022+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16023+#else
16024 /* 32-bit pagetable entries */
16025 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16026+#endif
16027 #else
16028 /* 64-bit pagetable entries */
16029 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16030 #endif
16031
16032-struct pv_mmu_ops pv_mmu_ops = {
16033+struct pv_mmu_ops pv_mmu_ops __read_only = {
16034
16035 .read_cr2 = native_read_cr2,
16036 .write_cr2 = native_write_cr2,
16037@@ -467,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16038 },
16039
16040 .set_fixmap = native_set_fixmap,
16041+
16042+#ifdef CONFIG_PAX_KERNEXEC
16043+ .pax_open_kernel = native_pax_open_kernel,
16044+ .pax_close_kernel = native_pax_close_kernel,
16045+#endif
16046+
16047 };
16048
16049 EXPORT_SYMBOL_GPL(pv_time_ops);
16050diff -urNp linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c
16051--- linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
16052+++ linux-2.6.32.45/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
16053@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
16054 __raw_spin_lock(lock);
16055 }
16056
16057-struct pv_lock_ops pv_lock_ops = {
16058+struct pv_lock_ops pv_lock_ops __read_only = {
16059 #ifdef CONFIG_SMP
16060 .spin_is_locked = __ticket_spin_is_locked,
16061 .spin_is_contended = __ticket_spin_is_contended,
16062diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c
16063--- linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
16064+++ linux-2.6.32.45/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
16065@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
16066 free_pages((unsigned long)vaddr, get_order(size));
16067 }
16068
16069-static struct dma_map_ops calgary_dma_ops = {
16070+static const struct dma_map_ops calgary_dma_ops = {
16071 .alloc_coherent = calgary_alloc_coherent,
16072 .free_coherent = calgary_free_coherent,
16073 .map_sg = calgary_map_sg,
16074diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-dma.c linux-2.6.32.45/arch/x86/kernel/pci-dma.c
16075--- linux-2.6.32.45/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
16076+++ linux-2.6.32.45/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
16077@@ -14,7 +14,7 @@
16078
16079 static int forbid_dac __read_mostly;
16080
16081-struct dma_map_ops *dma_ops;
16082+const struct dma_map_ops *dma_ops;
16083 EXPORT_SYMBOL(dma_ops);
16084
16085 static int iommu_sac_force __read_mostly;
16086@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
16087
16088 int dma_supported(struct device *dev, u64 mask)
16089 {
16090- struct dma_map_ops *ops = get_dma_ops(dev);
16091+ const struct dma_map_ops *ops = get_dma_ops(dev);
16092
16093 #ifdef CONFIG_PCI
16094 if (mask > 0xffffffff && forbid_dac > 0) {
16095diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c
16096--- linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
16097+++ linux-2.6.32.45/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
16098@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
16099 return -1;
16100 }
16101
16102-static struct dma_map_ops gart_dma_ops = {
16103+static const struct dma_map_ops gart_dma_ops = {
16104 .map_sg = gart_map_sg,
16105 .unmap_sg = gart_unmap_sg,
16106 .map_page = gart_map_page,
16107diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-nommu.c linux-2.6.32.45/arch/x86/kernel/pci-nommu.c
16108--- linux-2.6.32.45/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
16109+++ linux-2.6.32.45/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
16110@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
16111 flush_write_buffers();
16112 }
16113
16114-struct dma_map_ops nommu_dma_ops = {
16115+const struct dma_map_ops nommu_dma_ops = {
16116 .alloc_coherent = dma_generic_alloc_coherent,
16117 .free_coherent = nommu_free_coherent,
16118 .map_sg = nommu_map_sg,
16119diff -urNp linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c
16120--- linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
16121+++ linux-2.6.32.45/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
16122@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
16123 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
16124 }
16125
16126-static struct dma_map_ops swiotlb_dma_ops = {
16127+static const struct dma_map_ops swiotlb_dma_ops = {
16128 .mapping_error = swiotlb_dma_mapping_error,
16129 .alloc_coherent = x86_swiotlb_alloc_coherent,
16130 .free_coherent = swiotlb_free_coherent,
16131diff -urNp linux-2.6.32.45/arch/x86/kernel/process_32.c linux-2.6.32.45/arch/x86/kernel/process_32.c
16132--- linux-2.6.32.45/arch/x86/kernel/process_32.c 2011-06-25 12:55:34.000000000 -0400
16133+++ linux-2.6.32.45/arch/x86/kernel/process_32.c 2011-06-25 12:56:37.000000000 -0400
16134@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
16135 unsigned long thread_saved_pc(struct task_struct *tsk)
16136 {
16137 return ((unsigned long *)tsk->thread.sp)[3];
16138+//XXX return tsk->thread.eip;
16139 }
16140
16141 #ifndef CONFIG_SMP
16142@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
16143 unsigned short ss, gs;
16144 const char *board;
16145
16146- if (user_mode_vm(regs)) {
16147+ if (user_mode(regs)) {
16148 sp = regs->sp;
16149 ss = regs->ss & 0xffff;
16150- gs = get_user_gs(regs);
16151 } else {
16152 sp = (unsigned long) (&regs->sp);
16153 savesegment(ss, ss);
16154- savesegment(gs, gs);
16155 }
16156+ gs = get_user_gs(regs);
16157
16158 printk("\n");
16159
16160@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
16161 regs.bx = (unsigned long) fn;
16162 regs.dx = (unsigned long) arg;
16163
16164- regs.ds = __USER_DS;
16165- regs.es = __USER_DS;
16166+ regs.ds = __KERNEL_DS;
16167+ regs.es = __KERNEL_DS;
16168 regs.fs = __KERNEL_PERCPU;
16169- regs.gs = __KERNEL_STACK_CANARY;
16170+ savesegment(gs, regs.gs);
16171 regs.orig_ax = -1;
16172 regs.ip = (unsigned long) kernel_thread_helper;
16173 regs.cs = __KERNEL_CS | get_kernel_rpl();
16174@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
16175 struct task_struct *tsk;
16176 int err;
16177
16178- childregs = task_pt_regs(p);
16179+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16180 *childregs = *regs;
16181 childregs->ax = 0;
16182 childregs->sp = sp;
16183
16184 p->thread.sp = (unsigned long) childregs;
16185 p->thread.sp0 = (unsigned long) (childregs+1);
16186+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16187
16188 p->thread.ip = (unsigned long) ret_from_fork;
16189
16190@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
16191 struct thread_struct *prev = &prev_p->thread,
16192 *next = &next_p->thread;
16193 int cpu = smp_processor_id();
16194- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16195+ struct tss_struct *tss = init_tss + cpu;
16196 bool preload_fpu;
16197
16198 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16199@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
16200 */
16201 lazy_save_gs(prev->gs);
16202
16203+#ifdef CONFIG_PAX_MEMORY_UDEREF
16204+ __set_fs(task_thread_info(next_p)->addr_limit);
16205+#endif
16206+
16207 /*
16208 * Load the per-thread Thread-Local Storage descriptor.
16209 */
16210@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
16211 */
16212 arch_end_context_switch(next_p);
16213
16214+ percpu_write(current_task, next_p);
16215+ percpu_write(current_tinfo, &next_p->tinfo);
16216+
16217 if (preload_fpu)
16218 __math_state_restore();
16219
16220@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
16221 if (prev->gs | next->gs)
16222 lazy_load_gs(next->gs);
16223
16224- percpu_write(current_task, next_p);
16225-
16226 return prev_p;
16227 }
16228
16229@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
16230 } while (count++ < 16);
16231 return 0;
16232 }
16233-
16234diff -urNp linux-2.6.32.45/arch/x86/kernel/process_64.c linux-2.6.32.45/arch/x86/kernel/process_64.c
16235--- linux-2.6.32.45/arch/x86/kernel/process_64.c 2011-06-25 12:55:34.000000000 -0400
16236+++ linux-2.6.32.45/arch/x86/kernel/process_64.c 2011-06-25 12:56:37.000000000 -0400
16237@@ -91,7 +91,7 @@ static void __exit_idle(void)
16238 void exit_idle(void)
16239 {
16240 /* idle loop has pid 0 */
16241- if (current->pid)
16242+ if (task_pid_nr(current))
16243 return;
16244 __exit_idle();
16245 }
16246@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
16247 if (!board)
16248 board = "";
16249 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
16250- current->pid, current->comm, print_tainted(),
16251+ task_pid_nr(current), current->comm, print_tainted(),
16252 init_utsname()->release,
16253 (int)strcspn(init_utsname()->version, " "),
16254 init_utsname()->version, board);
16255@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
16256 struct pt_regs *childregs;
16257 struct task_struct *me = current;
16258
16259- childregs = ((struct pt_regs *)
16260- (THREAD_SIZE + task_stack_page(p))) - 1;
16261+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16262 *childregs = *regs;
16263
16264 childregs->ax = 0;
16265@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
16266 p->thread.sp = (unsigned long) childregs;
16267 p->thread.sp0 = (unsigned long) (childregs+1);
16268 p->thread.usersp = me->thread.usersp;
16269+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16270
16271 set_tsk_thread_flag(p, TIF_FORK);
16272
16273@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
16274 struct thread_struct *prev = &prev_p->thread;
16275 struct thread_struct *next = &next_p->thread;
16276 int cpu = smp_processor_id();
16277- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16278+ struct tss_struct *tss = init_tss + cpu;
16279 unsigned fsindex, gsindex;
16280 bool preload_fpu;
16281
16282@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
16283 prev->usersp = percpu_read(old_rsp);
16284 percpu_write(old_rsp, next->usersp);
16285 percpu_write(current_task, next_p);
16286+ percpu_write(current_tinfo, &next_p->tinfo);
16287
16288- percpu_write(kernel_stack,
16289- (unsigned long)task_stack_page(next_p) +
16290- THREAD_SIZE - KERNEL_STACK_OFFSET);
16291+ percpu_write(kernel_stack, next->sp0);
16292
16293 /*
16294 * Now maybe reload the debug registers and handle I/O bitmaps
16295@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
16296 if (!p || p == current || p->state == TASK_RUNNING)
16297 return 0;
16298 stack = (unsigned long)task_stack_page(p);
16299- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16300+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16301 return 0;
16302 fp = *(u64 *)(p->thread.sp);
16303 do {
16304- if (fp < (unsigned long)stack ||
16305- fp >= (unsigned long)stack+THREAD_SIZE)
16306+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16307 return 0;
16308 ip = *(u64 *)(fp+8);
16309 if (!in_sched_functions(ip))
16310diff -urNp linux-2.6.32.45/arch/x86/kernel/process.c linux-2.6.32.45/arch/x86/kernel/process.c
16311--- linux-2.6.32.45/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
16312+++ linux-2.6.32.45/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
16313@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
16314
16315 void free_thread_info(struct thread_info *ti)
16316 {
16317- free_thread_xstate(ti->task);
16318 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
16319 }
16320
16321+static struct kmem_cache *task_struct_cachep;
16322+
16323 void arch_task_cache_init(void)
16324 {
16325- task_xstate_cachep =
16326- kmem_cache_create("task_xstate", xstate_size,
16327+ /* create a slab on which task_structs can be allocated */
16328+ task_struct_cachep =
16329+ kmem_cache_create("task_struct", sizeof(struct task_struct),
16330+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16331+
16332+ task_xstate_cachep =
16333+ kmem_cache_create("task_xstate", xstate_size,
16334 __alignof__(union thread_xstate),
16335- SLAB_PANIC | SLAB_NOTRACK, NULL);
16336+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16337+}
16338+
16339+struct task_struct *alloc_task_struct(void)
16340+{
16341+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
16342+}
16343+
16344+void free_task_struct(struct task_struct *task)
16345+{
16346+ free_thread_xstate(task);
16347+ kmem_cache_free(task_struct_cachep, task);
16348 }
16349
16350 /*
16351@@ -73,7 +90,7 @@ void exit_thread(void)
16352 unsigned long *bp = t->io_bitmap_ptr;
16353
16354 if (bp) {
16355- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16356+ struct tss_struct *tss = init_tss + get_cpu();
16357
16358 t->io_bitmap_ptr = NULL;
16359 clear_thread_flag(TIF_IO_BITMAP);
16360@@ -93,6 +110,9 @@ void flush_thread(void)
16361
16362 clear_tsk_thread_flag(tsk, TIF_DEBUG);
16363
16364+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16365+ loadsegment(gs, 0);
16366+#endif
16367 tsk->thread.debugreg0 = 0;
16368 tsk->thread.debugreg1 = 0;
16369 tsk->thread.debugreg2 = 0;
16370@@ -307,7 +327,7 @@ void default_idle(void)
16371 EXPORT_SYMBOL(default_idle);
16372 #endif
16373
16374-void stop_this_cpu(void *dummy)
16375+__noreturn void stop_this_cpu(void *dummy)
16376 {
16377 local_irq_disable();
16378 /*
16379@@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
16380 }
16381 early_param("idle", idle_setup);
16382
16383-unsigned long arch_align_stack(unsigned long sp)
16384+#ifdef CONFIG_PAX_RANDKSTACK
16385+asmlinkage void pax_randomize_kstack(void)
16386 {
16387- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16388- sp -= get_random_int() % 8192;
16389- return sp & ~0xf;
16390-}
16391+ struct thread_struct *thread = &current->thread;
16392+ unsigned long time;
16393
16394-unsigned long arch_randomize_brk(struct mm_struct *mm)
16395-{
16396- unsigned long range_end = mm->brk + 0x02000000;
16397- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16398+ if (!randomize_va_space)
16399+ return;
16400+
16401+ rdtscl(time);
16402+
16403+ /* P4 seems to return a 0 LSB, ignore it */
16404+#ifdef CONFIG_MPENTIUM4
16405+ time &= 0x3EUL;
16406+ time <<= 2;
16407+#elif defined(CONFIG_X86_64)
16408+ time &= 0xFUL;
16409+ time <<= 4;
16410+#else
16411+ time &= 0x1FUL;
16412+ time <<= 3;
16413+#endif
16414+
16415+ thread->sp0 ^= time;
16416+ load_sp0(init_tss + smp_processor_id(), thread);
16417+
16418+#ifdef CONFIG_X86_64
16419+ percpu_write(kernel_stack, thread->sp0);
16420+#endif
16421 }
16422+#endif
16423
16424diff -urNp linux-2.6.32.45/arch/x86/kernel/ptrace.c linux-2.6.32.45/arch/x86/kernel/ptrace.c
16425--- linux-2.6.32.45/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
16426+++ linux-2.6.32.45/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
16427@@ -925,7 +925,7 @@ static const struct user_regset_view use
16428 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
16429 {
16430 int ret;
16431- unsigned long __user *datap = (unsigned long __user *)data;
16432+ unsigned long __user *datap = (__force unsigned long __user *)data;
16433
16434 switch (request) {
16435 /* read the word at location addr in the USER area. */
16436@@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
16437 if (addr < 0)
16438 return -EIO;
16439 ret = do_get_thread_area(child, addr,
16440- (struct user_desc __user *) data);
16441+ (__force struct user_desc __user *) data);
16442 break;
16443
16444 case PTRACE_SET_THREAD_AREA:
16445 if (addr < 0)
16446 return -EIO;
16447 ret = do_set_thread_area(child, addr,
16448- (struct user_desc __user *) data, 0);
16449+ (__force struct user_desc __user *) data, 0);
16450 break;
16451 #endif
16452
16453@@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
16454 #ifdef CONFIG_X86_PTRACE_BTS
16455 case PTRACE_BTS_CONFIG:
16456 ret = ptrace_bts_config
16457- (child, data, (struct ptrace_bts_config __user *)addr);
16458+ (child, data, (__force struct ptrace_bts_config __user *)addr);
16459 break;
16460
16461 case PTRACE_BTS_STATUS:
16462 ret = ptrace_bts_status
16463- (child, data, (struct ptrace_bts_config __user *)addr);
16464+ (child, data, (__force struct ptrace_bts_config __user *)addr);
16465 break;
16466
16467 case PTRACE_BTS_SIZE:
16468@@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
16469
16470 case PTRACE_BTS_GET:
16471 ret = ptrace_bts_read_record
16472- (child, data, (struct bts_struct __user *) addr);
16473+ (child, data, (__force struct bts_struct __user *) addr);
16474 break;
16475
16476 case PTRACE_BTS_CLEAR:
16477@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
16478
16479 case PTRACE_BTS_DRAIN:
16480 ret = ptrace_bts_drain
16481- (child, data, (struct bts_struct __user *) addr);
16482+ (child, data, (__force struct bts_struct __user *) addr);
16483 break;
16484 #endif /* CONFIG_X86_PTRACE_BTS */
16485
16486@@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
16487 info.si_code = si_code;
16488
16489 /* User-mode ip? */
16490- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
16491+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
16492
16493 /* Send us the fake SIGTRAP */
16494 force_sig_info(SIGTRAP, &info, tsk);
16495@@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
16496 * We must return the syscall number to actually look up in the table.
16497 * This can be -1L to skip running any syscall at all.
16498 */
16499-asmregparm long syscall_trace_enter(struct pt_regs *regs)
16500+long syscall_trace_enter(struct pt_regs *regs)
16501 {
16502 long ret = 0;
16503
16504@@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
16505 return ret ?: regs->orig_ax;
16506 }
16507
16508-asmregparm void syscall_trace_leave(struct pt_regs *regs)
16509+void syscall_trace_leave(struct pt_regs *regs)
16510 {
16511 if (unlikely(current->audit_context))
16512 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
16513diff -urNp linux-2.6.32.45/arch/x86/kernel/reboot.c linux-2.6.32.45/arch/x86/kernel/reboot.c
16514--- linux-2.6.32.45/arch/x86/kernel/reboot.c 2011-08-09 18:35:28.000000000 -0400
16515+++ linux-2.6.32.45/arch/x86/kernel/reboot.c 2011-08-09 18:33:59.000000000 -0400
16516@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
16517 EXPORT_SYMBOL(pm_power_off);
16518
16519 static const struct desc_ptr no_idt = {};
16520-static int reboot_mode;
16521+static unsigned short reboot_mode;
16522 enum reboot_type reboot_type = BOOT_KBD;
16523 int reboot_force;
16524
16525@@ -292,12 +292,12 @@ core_initcall(reboot_init);
16526 controller to pulse the CPU reset line, which is more thorough, but
16527 doesn't work with at least one type of 486 motherboard. It is easy
16528 to stop this code working; hence the copious comments. */
16529-static const unsigned long long
16530-real_mode_gdt_entries [3] =
16531+static struct desc_struct
16532+real_mode_gdt_entries [3] __read_only =
16533 {
16534- 0x0000000000000000ULL, /* Null descriptor */
16535- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
16536- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
16537+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
16538+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
16539+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
16540 };
16541
16542 static const struct desc_ptr
16543@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
16544 * specified by the code and length parameters.
16545 * We assume that length will aways be less that 100!
16546 */
16547-void machine_real_restart(const unsigned char *code, int length)
16548+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
16549 {
16550 local_irq_disable();
16551
16552@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
16553 /* Remap the kernel at virtual address zero, as well as offset zero
16554 from the kernel segment. This assumes the kernel segment starts at
16555 virtual address PAGE_OFFSET. */
16556- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16557- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
16558+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16559+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
16560
16561 /*
16562 * Use `swapper_pg_dir' as our page directory.
16563@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
16564 boot)". This seems like a fairly standard thing that gets set by
16565 REBOOT.COM programs, and the previous reset routine did this
16566 too. */
16567- *((unsigned short *)0x472) = reboot_mode;
16568+ *(unsigned short *)(__va(0x472)) = reboot_mode;
16569
16570 /* For the switch to real mode, copy some code to low memory. It has
16571 to be in the first 64k because it is running in 16-bit mode, and it
16572 has to have the same physical and virtual address, because it turns
16573 off paging. Copy it near the end of the first page, out of the way
16574 of BIOS variables. */
16575- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
16576- real_mode_switch, sizeof (real_mode_switch));
16577- memcpy((void *)(0x1000 - 100), code, length);
16578+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
16579+ memcpy(__va(0x1000 - 100), code, length);
16580
16581 /* Set up the IDT for real mode. */
16582 load_idt(&real_mode_idt);
16583@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
16584 __asm__ __volatile__ ("ljmp $0x0008,%0"
16585 :
16586 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
16587+ do { } while (1);
16588 }
16589 #ifdef CONFIG_APM_MODULE
16590 EXPORT_SYMBOL(machine_real_restart);
16591@@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_f
16592 {
16593 }
16594
16595-static void native_machine_emergency_restart(void)
16596+__noreturn static void native_machine_emergency_restart(void)
16597 {
16598 int i;
16599
16600@@ -659,13 +659,13 @@ void native_machine_shutdown(void)
16601 #endif
16602 }
16603
16604-static void __machine_emergency_restart(int emergency)
16605+static __noreturn void __machine_emergency_restart(int emergency)
16606 {
16607 reboot_emergency = emergency;
16608 machine_ops.emergency_restart();
16609 }
16610
16611-static void native_machine_restart(char *__unused)
16612+static __noreturn void native_machine_restart(char *__unused)
16613 {
16614 printk("machine restart\n");
16615
16616@@ -674,7 +674,7 @@ static void native_machine_restart(char
16617 __machine_emergency_restart(0);
16618 }
16619
16620-static void native_machine_halt(void)
16621+static __noreturn void native_machine_halt(void)
16622 {
16623 /* stop other cpus and apics */
16624 machine_shutdown();
16625@@ -685,7 +685,7 @@ static void native_machine_halt(void)
16626 stop_this_cpu(NULL);
16627 }
16628
16629-static void native_machine_power_off(void)
16630+__noreturn static void native_machine_power_off(void)
16631 {
16632 if (pm_power_off) {
16633 if (!reboot_force)
16634@@ -694,6 +694,7 @@ static void native_machine_power_off(voi
16635 }
16636 /* a fallback in case there is no PM info available */
16637 tboot_shutdown(TB_SHUTDOWN_HALT);
16638+ do { } while (1);
16639 }
16640
16641 struct machine_ops machine_ops = {
16642diff -urNp linux-2.6.32.45/arch/x86/kernel/setup.c linux-2.6.32.45/arch/x86/kernel/setup.c
16643--- linux-2.6.32.45/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
16644+++ linux-2.6.32.45/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
16645@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
16646
16647 if (!boot_params.hdr.root_flags)
16648 root_mountflags &= ~MS_RDONLY;
16649- init_mm.start_code = (unsigned long) _text;
16650- init_mm.end_code = (unsigned long) _etext;
16651+ init_mm.start_code = ktla_ktva((unsigned long) _text);
16652+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
16653 init_mm.end_data = (unsigned long) _edata;
16654 init_mm.brk = _brk_end;
16655
16656- code_resource.start = virt_to_phys(_text);
16657- code_resource.end = virt_to_phys(_etext)-1;
16658- data_resource.start = virt_to_phys(_etext);
16659+ code_resource.start = virt_to_phys(ktla_ktva(_text));
16660+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16661+ data_resource.start = virt_to_phys(_sdata);
16662 data_resource.end = virt_to_phys(_edata)-1;
16663 bss_resource.start = virt_to_phys(&__bss_start);
16664 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16665diff -urNp linux-2.6.32.45/arch/x86/kernel/setup_percpu.c linux-2.6.32.45/arch/x86/kernel/setup_percpu.c
16666--- linux-2.6.32.45/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
16667+++ linux-2.6.32.45/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
16668@@ -25,19 +25,17 @@
16669 # define DBG(x...)
16670 #endif
16671
16672-DEFINE_PER_CPU(int, cpu_number);
16673+#ifdef CONFIG_SMP
16674+DEFINE_PER_CPU(unsigned int, cpu_number);
16675 EXPORT_PER_CPU_SYMBOL(cpu_number);
16676+#endif
16677
16678-#ifdef CONFIG_X86_64
16679 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16680-#else
16681-#define BOOT_PERCPU_OFFSET 0
16682-#endif
16683
16684 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16685 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16686
16687-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16688+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16689 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16690 };
16691 EXPORT_SYMBOL(__per_cpu_offset);
16692@@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
16693 {
16694 #ifdef CONFIG_X86_32
16695 struct desc_struct gdt;
16696+ unsigned long base = per_cpu_offset(cpu);
16697
16698- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16699- 0x2 | DESCTYPE_S, 0x8);
16700- gdt.s = 1;
16701+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16702+ 0x83 | DESCTYPE_S, 0xC);
16703 write_gdt_entry(get_cpu_gdt_table(cpu),
16704 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16705 #endif
16706@@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
16707 /* alrighty, percpu areas up and running */
16708 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16709 for_each_possible_cpu(cpu) {
16710+#ifdef CONFIG_CC_STACKPROTECTOR
16711+#ifdef CONFIG_X86_32
16712+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
16713+#endif
16714+#endif
16715 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16716 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16717 per_cpu(cpu_number, cpu) = cpu;
16718@@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
16719 early_per_cpu_map(x86_cpu_to_node_map, cpu);
16720 #endif
16721 #endif
16722+#ifdef CONFIG_CC_STACKPROTECTOR
16723+#ifdef CONFIG_X86_32
16724+ if (!cpu)
16725+ per_cpu(stack_canary.canary, cpu) = canary;
16726+#endif
16727+#endif
16728 /*
16729 * Up to this point, the boot CPU has been using .data.init
16730 * area. Reload any changed state for the boot CPU.
16731diff -urNp linux-2.6.32.45/arch/x86/kernel/signal.c linux-2.6.32.45/arch/x86/kernel/signal.c
16732--- linux-2.6.32.45/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
16733+++ linux-2.6.32.45/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
16734@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
16735 * Align the stack pointer according to the i386 ABI,
16736 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16737 */
16738- sp = ((sp + 4) & -16ul) - 4;
16739+ sp = ((sp - 12) & -16ul) - 4;
16740 #else /* !CONFIG_X86_32 */
16741 sp = round_down(sp, 16) - 8;
16742 #endif
16743@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16744 * Return an always-bogus address instead so we will die with SIGSEGV.
16745 */
16746 if (onsigstack && !likely(on_sig_stack(sp)))
16747- return (void __user *)-1L;
16748+ return (__force void __user *)-1L;
16749
16750 /* save i387 state */
16751 if (used_math() && save_i387_xstate(*fpstate) < 0)
16752- return (void __user *)-1L;
16753+ return (__force void __user *)-1L;
16754
16755 return (void __user *)sp;
16756 }
16757@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16758 }
16759
16760 if (current->mm->context.vdso)
16761- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16762+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16763 else
16764- restorer = &frame->retcode;
16765+ restorer = (void __user *)&frame->retcode;
16766 if (ka->sa.sa_flags & SA_RESTORER)
16767 restorer = ka->sa.sa_restorer;
16768
16769@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16770 * reasons and because gdb uses it as a signature to notice
16771 * signal handler stack frames.
16772 */
16773- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16774+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16775
16776 if (err)
16777 return -EFAULT;
16778@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16779 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16780
16781 /* Set up to return from userspace. */
16782- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16783+ if (current->mm->context.vdso)
16784+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16785+ else
16786+ restorer = (void __user *)&frame->retcode;
16787 if (ka->sa.sa_flags & SA_RESTORER)
16788 restorer = ka->sa.sa_restorer;
16789 put_user_ex(restorer, &frame->pretcode);
16790@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16791 * reasons and because gdb uses it as a signature to notice
16792 * signal handler stack frames.
16793 */
16794- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16795+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16796 } put_user_catch(err);
16797
16798 if (err)
16799@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16800 int signr;
16801 sigset_t *oldset;
16802
16803+ pax_track_stack();
16804+
16805 /*
16806 * We want the common case to go fast, which is why we may in certain
16807 * cases get here from kernel mode. Just return without doing anything
16808@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16809 * X86_32: vm86 regs switched out by assembly code before reaching
16810 * here, so testing against kernel CS suffices.
16811 */
16812- if (!user_mode(regs))
16813+ if (!user_mode_novm(regs))
16814 return;
16815
16816 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16817diff -urNp linux-2.6.32.45/arch/x86/kernel/smpboot.c linux-2.6.32.45/arch/x86/kernel/smpboot.c
16818--- linux-2.6.32.45/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16819+++ linux-2.6.32.45/arch/x86/kernel/smpboot.c 2011-07-01 19:10:03.000000000 -0400
16820@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16821 */
16822 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16823
16824-void cpu_hotplug_driver_lock()
16825+void cpu_hotplug_driver_lock(void)
16826 {
16827- mutex_lock(&x86_cpu_hotplug_driver_mutex);
16828+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
16829 }
16830
16831-void cpu_hotplug_driver_unlock()
16832+void cpu_hotplug_driver_unlock(void)
16833 {
16834- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16835+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16836 }
16837
16838 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16839@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_a
16840 * target processor state.
16841 */
16842 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
16843- (unsigned long)stack_start.sp);
16844+ stack_start);
16845
16846 /*
16847 * Run STARTUP IPI loop.
16848@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16849 set_idle_for_cpu(cpu, c_idle.idle);
16850 do_rest:
16851 per_cpu(current_task, cpu) = c_idle.idle;
16852+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16853 #ifdef CONFIG_X86_32
16854 /* Stack for startup_32 can be just as for start_secondary onwards */
16855 irq_ctx_init(cpu);
16856@@ -750,13 +751,15 @@ do_rest:
16857 #else
16858 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16859 initial_gs = per_cpu_offset(cpu);
16860- per_cpu(kernel_stack, cpu) =
16861- (unsigned long)task_stack_page(c_idle.idle) -
16862- KERNEL_STACK_OFFSET + THREAD_SIZE;
16863+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16864 #endif
16865+
16866+ pax_open_kernel();
16867 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16868+ pax_close_kernel();
16869+
16870 initial_code = (unsigned long)start_secondary;
16871- stack_start.sp = (void *) c_idle.idle->thread.sp;
16872+ stack_start = c_idle.idle->thread.sp;
16873
16874 /* start_ip had better be page-aligned! */
16875 start_ip = setup_trampoline();
16876@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16877
16878 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16879
16880+#ifdef CONFIG_PAX_PER_CPU_PGD
16881+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16882+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16883+ KERNEL_PGD_PTRS);
16884+#endif
16885+
16886 err = do_boot_cpu(apicid, cpu);
16887
16888 if (err) {
16889diff -urNp linux-2.6.32.45/arch/x86/kernel/step.c linux-2.6.32.45/arch/x86/kernel/step.c
16890--- linux-2.6.32.45/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16891+++ linux-2.6.32.45/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16892@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16893 struct desc_struct *desc;
16894 unsigned long base;
16895
16896- seg &= ~7UL;
16897+ seg >>= 3;
16898
16899 mutex_lock(&child->mm->context.lock);
16900- if (unlikely((seg >> 3) >= child->mm->context.size))
16901+ if (unlikely(seg >= child->mm->context.size))
16902 addr = -1L; /* bogus selector, access would fault */
16903 else {
16904 desc = child->mm->context.ldt + seg;
16905@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16906 addr += base;
16907 }
16908 mutex_unlock(&child->mm->context.lock);
16909- }
16910+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16911+ addr = ktla_ktva(addr);
16912
16913 return addr;
16914 }
16915@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16916 unsigned char opcode[15];
16917 unsigned long addr = convert_ip_to_linear(child, regs);
16918
16919+ if (addr == -EINVAL)
16920+ return 0;
16921+
16922 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16923 for (i = 0; i < copied; i++) {
16924 switch (opcode[i]) {
16925@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16926
16927 #ifdef CONFIG_X86_64
16928 case 0x40 ... 0x4f:
16929- if (regs->cs != __USER_CS)
16930+ if ((regs->cs & 0xffff) != __USER_CS)
16931 /* 32-bit mode: register increment */
16932 return 0;
16933 /* 64-bit mode: REX prefix */
16934diff -urNp linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S
16935--- linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
16936+++ linux-2.6.32.45/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
16937@@ -1,3 +1,4 @@
16938+.section .rodata,"a",@progbits
16939 ENTRY(sys_call_table)
16940 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
16941 .long sys_exit
16942diff -urNp linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c
16943--- linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
16944+++ linux-2.6.32.45/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
16945@@ -24,6 +24,21 @@
16946
16947 #include <asm/syscalls.h>
16948
16949+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16950+{
16951+ unsigned long pax_task_size = TASK_SIZE;
16952+
16953+#ifdef CONFIG_PAX_SEGMEXEC
16954+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16955+ pax_task_size = SEGMEXEC_TASK_SIZE;
16956+#endif
16957+
16958+ if (len > pax_task_size || addr > pax_task_size - len)
16959+ return -EINVAL;
16960+
16961+ return 0;
16962+}
16963+
16964 /*
16965 * Perform the select(nd, in, out, ex, tv) and mmap() system
16966 * calls. Linux/i386 didn't use to be able to handle more than
16967@@ -58,6 +73,212 @@ out:
16968 return err;
16969 }
16970
16971+unsigned long
16972+arch_get_unmapped_area(struct file *filp, unsigned long addr,
16973+ unsigned long len, unsigned long pgoff, unsigned long flags)
16974+{
16975+ struct mm_struct *mm = current->mm;
16976+ struct vm_area_struct *vma;
16977+ unsigned long start_addr, pax_task_size = TASK_SIZE;
16978+
16979+#ifdef CONFIG_PAX_SEGMEXEC
16980+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16981+ pax_task_size = SEGMEXEC_TASK_SIZE;
16982+#endif
16983+
16984+ pax_task_size -= PAGE_SIZE;
16985+
16986+ if (len > pax_task_size)
16987+ return -ENOMEM;
16988+
16989+ if (flags & MAP_FIXED)
16990+ return addr;
16991+
16992+#ifdef CONFIG_PAX_RANDMMAP
16993+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16994+#endif
16995+
16996+ if (addr) {
16997+ addr = PAGE_ALIGN(addr);
16998+ if (pax_task_size - len >= addr) {
16999+ vma = find_vma(mm, addr);
17000+ if (check_heap_stack_gap(vma, addr, len))
17001+ return addr;
17002+ }
17003+ }
17004+ if (len > mm->cached_hole_size) {
17005+ start_addr = addr = mm->free_area_cache;
17006+ } else {
17007+ start_addr = addr = mm->mmap_base;
17008+ mm->cached_hole_size = 0;
17009+ }
17010+
17011+#ifdef CONFIG_PAX_PAGEEXEC
17012+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17013+ start_addr = 0x00110000UL;
17014+
17015+#ifdef CONFIG_PAX_RANDMMAP
17016+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17017+ start_addr += mm->delta_mmap & 0x03FFF000UL;
17018+#endif
17019+
17020+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17021+ start_addr = addr = mm->mmap_base;
17022+ else
17023+ addr = start_addr;
17024+ }
17025+#endif
17026+
17027+full_search:
17028+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17029+ /* At this point: (!vma || addr < vma->vm_end). */
17030+ if (pax_task_size - len < addr) {
17031+ /*
17032+ * Start a new search - just in case we missed
17033+ * some holes.
17034+ */
17035+ if (start_addr != mm->mmap_base) {
17036+ start_addr = addr = mm->mmap_base;
17037+ mm->cached_hole_size = 0;
17038+ goto full_search;
17039+ }
17040+ return -ENOMEM;
17041+ }
17042+ if (check_heap_stack_gap(vma, addr, len))
17043+ break;
17044+ if (addr + mm->cached_hole_size < vma->vm_start)
17045+ mm->cached_hole_size = vma->vm_start - addr;
17046+ addr = vma->vm_end;
17047+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
17048+ start_addr = addr = mm->mmap_base;
17049+ mm->cached_hole_size = 0;
17050+ goto full_search;
17051+ }
17052+ }
17053+
17054+ /*
17055+ * Remember the place where we stopped the search:
17056+ */
17057+ mm->free_area_cache = addr + len;
17058+ return addr;
17059+}
17060+
17061+unsigned long
17062+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17063+ const unsigned long len, const unsigned long pgoff,
17064+ const unsigned long flags)
17065+{
17066+ struct vm_area_struct *vma;
17067+ struct mm_struct *mm = current->mm;
17068+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17069+
17070+#ifdef CONFIG_PAX_SEGMEXEC
17071+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17072+ pax_task_size = SEGMEXEC_TASK_SIZE;
17073+#endif
17074+
17075+ pax_task_size -= PAGE_SIZE;
17076+
17077+ /* requested length too big for entire address space */
17078+ if (len > pax_task_size)
17079+ return -ENOMEM;
17080+
17081+ if (flags & MAP_FIXED)
17082+ return addr;
17083+
17084+#ifdef CONFIG_PAX_PAGEEXEC
17085+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17086+ goto bottomup;
17087+#endif
17088+
17089+#ifdef CONFIG_PAX_RANDMMAP
17090+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17091+#endif
17092+
17093+ /* requesting a specific address */
17094+ if (addr) {
17095+ addr = PAGE_ALIGN(addr);
17096+ if (pax_task_size - len >= addr) {
17097+ vma = find_vma(mm, addr);
17098+ if (check_heap_stack_gap(vma, addr, len))
17099+ return addr;
17100+ }
17101+ }
17102+
17103+ /* check if free_area_cache is useful for us */
17104+ if (len <= mm->cached_hole_size) {
17105+ mm->cached_hole_size = 0;
17106+ mm->free_area_cache = mm->mmap_base;
17107+ }
17108+
17109+ /* either no address requested or can't fit in requested address hole */
17110+ addr = mm->free_area_cache;
17111+
17112+ /* make sure it can fit in the remaining address space */
17113+ if (addr > len) {
17114+ vma = find_vma(mm, addr-len);
17115+ if (check_heap_stack_gap(vma, addr - len, len))
17116+ /* remember the address as a hint for next time */
17117+ return (mm->free_area_cache = addr-len);
17118+ }
17119+
17120+ if (mm->mmap_base < len)
17121+ goto bottomup;
17122+
17123+ addr = mm->mmap_base-len;
17124+
17125+ do {
17126+ /*
17127+ * Lookup failure means no vma is above this address,
17128+ * else if new region fits below vma->vm_start,
17129+ * return with success:
17130+ */
17131+ vma = find_vma(mm, addr);
17132+ if (check_heap_stack_gap(vma, addr, len))
17133+ /* remember the address as a hint for next time */
17134+ return (mm->free_area_cache = addr);
17135+
17136+ /* remember the largest hole we saw so far */
17137+ if (addr + mm->cached_hole_size < vma->vm_start)
17138+ mm->cached_hole_size = vma->vm_start - addr;
17139+
17140+ /* try just below the current vma->vm_start */
17141+ addr = skip_heap_stack_gap(vma, len);
17142+ } while (!IS_ERR_VALUE(addr));
17143+
17144+bottomup:
17145+ /*
17146+ * A failed mmap() very likely causes application failure,
17147+ * so fall back to the bottom-up function here. This scenario
17148+ * can happen with large stack limits and large mmap()
17149+ * allocations.
17150+ */
17151+
17152+#ifdef CONFIG_PAX_SEGMEXEC
17153+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17154+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17155+ else
17156+#endif
17157+
17158+ mm->mmap_base = TASK_UNMAPPED_BASE;
17159+
17160+#ifdef CONFIG_PAX_RANDMMAP
17161+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17162+ mm->mmap_base += mm->delta_mmap;
17163+#endif
17164+
17165+ mm->free_area_cache = mm->mmap_base;
17166+ mm->cached_hole_size = ~0UL;
17167+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17168+ /*
17169+ * Restore the topdown base:
17170+ */
17171+ mm->mmap_base = base;
17172+ mm->free_area_cache = base;
17173+ mm->cached_hole_size = ~0UL;
17174+
17175+ return addr;
17176+}
17177
17178 struct sel_arg_struct {
17179 unsigned long n;
17180@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
17181 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
17182 case SEMTIMEDOP:
17183 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
17184- (const struct timespec __user *)fifth);
17185+ (__force const struct timespec __user *)fifth);
17186
17187 case SEMGET:
17188 return sys_semget(first, second, third);
17189@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
17190 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
17191 if (ret)
17192 return ret;
17193- return put_user(raddr, (ulong __user *) third);
17194+ return put_user(raddr, (__force ulong __user *) third);
17195 }
17196 case 1: /* iBCS2 emulator entry point */
17197 if (!segment_eq(get_fs(), get_ds()))
17198@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
17199
17200 return error;
17201 }
17202-
17203-
17204-/*
17205- * Do a system call from kernel instead of calling sys_execve so we
17206- * end up with proper pt_regs.
17207- */
17208-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
17209-{
17210- long __res;
17211- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
17212- : "=a" (__res)
17213- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
17214- return __res;
17215-}
17216diff -urNp linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c
17217--- linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
17218+++ linux-2.6.32.45/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
17219@@ -32,8 +32,8 @@ out:
17220 return error;
17221 }
17222
17223-static void find_start_end(unsigned long flags, unsigned long *begin,
17224- unsigned long *end)
17225+static void find_start_end(struct mm_struct *mm, unsigned long flags,
17226+ unsigned long *begin, unsigned long *end)
17227 {
17228 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17229 unsigned long new_begin;
17230@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
17231 *begin = new_begin;
17232 }
17233 } else {
17234- *begin = TASK_UNMAPPED_BASE;
17235+ *begin = mm->mmap_base;
17236 *end = TASK_SIZE;
17237 }
17238 }
17239@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
17240 if (flags & MAP_FIXED)
17241 return addr;
17242
17243- find_start_end(flags, &begin, &end);
17244+ find_start_end(mm, flags, &begin, &end);
17245
17246 if (len > end)
17247 return -ENOMEM;
17248
17249+#ifdef CONFIG_PAX_RANDMMAP
17250+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17251+#endif
17252+
17253 if (addr) {
17254 addr = PAGE_ALIGN(addr);
17255 vma = find_vma(mm, addr);
17256- if (end - len >= addr &&
17257- (!vma || addr + len <= vma->vm_start))
17258+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17259 return addr;
17260 }
17261 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17262@@ -106,7 +109,7 @@ full_search:
17263 }
17264 return -ENOMEM;
17265 }
17266- if (!vma || addr + len <= vma->vm_start) {
17267+ if (check_heap_stack_gap(vma, addr, len)) {
17268 /*
17269 * Remember the place where we stopped the search:
17270 */
17271@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
17272 {
17273 struct vm_area_struct *vma;
17274 struct mm_struct *mm = current->mm;
17275- unsigned long addr = addr0;
17276+ unsigned long base = mm->mmap_base, addr = addr0;
17277
17278 /* requested length too big for entire address space */
17279 if (len > TASK_SIZE)
17280@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
17281 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17282 goto bottomup;
17283
17284+#ifdef CONFIG_PAX_RANDMMAP
17285+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17286+#endif
17287+
17288 /* requesting a specific address */
17289 if (addr) {
17290 addr = PAGE_ALIGN(addr);
17291- vma = find_vma(mm, addr);
17292- if (TASK_SIZE - len >= addr &&
17293- (!vma || addr + len <= vma->vm_start))
17294- return addr;
17295+ if (TASK_SIZE - len >= addr) {
17296+ vma = find_vma(mm, addr);
17297+ if (check_heap_stack_gap(vma, addr, len))
17298+ return addr;
17299+ }
17300 }
17301
17302 /* check if free_area_cache is useful for us */
17303@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
17304 /* make sure it can fit in the remaining address space */
17305 if (addr > len) {
17306 vma = find_vma(mm, addr-len);
17307- if (!vma || addr <= vma->vm_start)
17308+ if (check_heap_stack_gap(vma, addr - len, len))
17309 /* remember the address as a hint for next time */
17310 return mm->free_area_cache = addr-len;
17311 }
17312@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
17313 * return with success:
17314 */
17315 vma = find_vma(mm, addr);
17316- if (!vma || addr+len <= vma->vm_start)
17317+ if (check_heap_stack_gap(vma, addr, len))
17318 /* remember the address as a hint for next time */
17319 return mm->free_area_cache = addr;
17320
17321@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
17322 mm->cached_hole_size = vma->vm_start - addr;
17323
17324 /* try just below the current vma->vm_start */
17325- addr = vma->vm_start-len;
17326- } while (len < vma->vm_start);
17327+ addr = skip_heap_stack_gap(vma, len);
17328+ } while (!IS_ERR_VALUE(addr));
17329
17330 bottomup:
17331 /*
17332@@ -198,13 +206,21 @@ bottomup:
17333 * can happen with large stack limits and large mmap()
17334 * allocations.
17335 */
17336+ mm->mmap_base = TASK_UNMAPPED_BASE;
17337+
17338+#ifdef CONFIG_PAX_RANDMMAP
17339+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17340+ mm->mmap_base += mm->delta_mmap;
17341+#endif
17342+
17343+ mm->free_area_cache = mm->mmap_base;
17344 mm->cached_hole_size = ~0UL;
17345- mm->free_area_cache = TASK_UNMAPPED_BASE;
17346 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17347 /*
17348 * Restore the topdown base:
17349 */
17350- mm->free_area_cache = mm->mmap_base;
17351+ mm->mmap_base = base;
17352+ mm->free_area_cache = base;
17353 mm->cached_hole_size = ~0UL;
17354
17355 return addr;
17356diff -urNp linux-2.6.32.45/arch/x86/kernel/tboot.c linux-2.6.32.45/arch/x86/kernel/tboot.c
17357--- linux-2.6.32.45/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
17358+++ linux-2.6.32.45/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
17359@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
17360
17361 void tboot_shutdown(u32 shutdown_type)
17362 {
17363- void (*shutdown)(void);
17364+ void (* __noreturn shutdown)(void);
17365
17366 if (!tboot_enabled())
17367 return;
17368@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
17369
17370 switch_to_tboot_pt();
17371
17372- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17373+ shutdown = (void *)tboot->shutdown_entry;
17374 shutdown();
17375
17376 /* should not reach here */
17377@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
17378 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17379 }
17380
17381-static atomic_t ap_wfs_count;
17382+static atomic_unchecked_t ap_wfs_count;
17383
17384 static int tboot_wait_for_aps(int num_aps)
17385 {
17386@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
17387 {
17388 switch (action) {
17389 case CPU_DYING:
17390- atomic_inc(&ap_wfs_count);
17391+ atomic_inc_unchecked(&ap_wfs_count);
17392 if (num_online_cpus() == 1)
17393- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17394+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17395 return NOTIFY_BAD;
17396 break;
17397 }
17398@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
17399
17400 tboot_create_trampoline();
17401
17402- atomic_set(&ap_wfs_count, 0);
17403+ atomic_set_unchecked(&ap_wfs_count, 0);
17404 register_hotcpu_notifier(&tboot_cpu_notifier);
17405 return 0;
17406 }
17407diff -urNp linux-2.6.32.45/arch/x86/kernel/time.c linux-2.6.32.45/arch/x86/kernel/time.c
17408--- linux-2.6.32.45/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
17409+++ linux-2.6.32.45/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
17410@@ -26,17 +26,13 @@
17411 int timer_ack;
17412 #endif
17413
17414-#ifdef CONFIG_X86_64
17415-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
17416-#endif
17417-
17418 unsigned long profile_pc(struct pt_regs *regs)
17419 {
17420 unsigned long pc = instruction_pointer(regs);
17421
17422- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17423+ if (!user_mode(regs) && in_lock_functions(pc)) {
17424 #ifdef CONFIG_FRAME_POINTER
17425- return *(unsigned long *)(regs->bp + sizeof(long));
17426+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17427 #else
17428 unsigned long *sp =
17429 (unsigned long *)kernel_stack_pointer(regs);
17430@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
17431 * or above a saved flags. Eflags has bits 22-31 zero,
17432 * kernel addresses don't.
17433 */
17434+
17435+#ifdef CONFIG_PAX_KERNEXEC
17436+ return ktla_ktva(sp[0]);
17437+#else
17438 if (sp[0] >> 22)
17439 return sp[0];
17440 if (sp[1] >> 22)
17441 return sp[1];
17442 #endif
17443+
17444+#endif
17445 }
17446 return pc;
17447 }
17448diff -urNp linux-2.6.32.45/arch/x86/kernel/tls.c linux-2.6.32.45/arch/x86/kernel/tls.c
17449--- linux-2.6.32.45/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
17450+++ linux-2.6.32.45/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
17451@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
17452 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17453 return -EINVAL;
17454
17455+#ifdef CONFIG_PAX_SEGMEXEC
17456+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17457+ return -EINVAL;
17458+#endif
17459+
17460 set_tls_desc(p, idx, &info, 1);
17461
17462 return 0;
17463diff -urNp linux-2.6.32.45/arch/x86/kernel/trampoline_32.S linux-2.6.32.45/arch/x86/kernel/trampoline_32.S
17464--- linux-2.6.32.45/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
17465+++ linux-2.6.32.45/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
17466@@ -32,6 +32,12 @@
17467 #include <asm/segment.h>
17468 #include <asm/page_types.h>
17469
17470+#ifdef CONFIG_PAX_KERNEXEC
17471+#define ta(X) (X)
17472+#else
17473+#define ta(X) ((X) - __PAGE_OFFSET)
17474+#endif
17475+
17476 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
17477 __CPUINITRODATA
17478 .code16
17479@@ -60,7 +66,7 @@ r_base = .
17480 inc %ax # protected mode (PE) bit
17481 lmsw %ax # into protected mode
17482 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17483- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17484+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
17485
17486 # These need to be in the same 64K segment as the above;
17487 # hence we don't use the boot_gdt_descr defined in head.S
17488diff -urNp linux-2.6.32.45/arch/x86/kernel/trampoline_64.S linux-2.6.32.45/arch/x86/kernel/trampoline_64.S
17489--- linux-2.6.32.45/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
17490+++ linux-2.6.32.45/arch/x86/kernel/trampoline_64.S 2011-07-01 18:53:26.000000000 -0400
17491@@ -91,7 +91,7 @@ startup_32:
17492 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17493 movl %eax, %ds
17494
17495- movl $X86_CR4_PAE, %eax
17496+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17497 movl %eax, %cr4 # Enable PAE mode
17498
17499 # Setup trampoline 4 level pagetables
17500@@ -127,7 +127,7 @@ startup_64:
17501 no_longmode:
17502 hlt
17503 jmp no_longmode
17504-#include "verify_cpu_64.S"
17505+#include "verify_cpu.S"
17506
17507 # Careful these need to be in the same 64K segment as the above;
17508 tidt:
17509@@ -138,7 +138,7 @@ tidt:
17510 # so the kernel can live anywhere
17511 .balign 4
17512 tgdt:
17513- .short tgdt_end - tgdt # gdt limit
17514+ .short tgdt_end - tgdt - 1 # gdt limit
17515 .long tgdt - r_base
17516 .short 0
17517 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17518diff -urNp linux-2.6.32.45/arch/x86/kernel/traps.c linux-2.6.32.45/arch/x86/kernel/traps.c
17519--- linux-2.6.32.45/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
17520+++ linux-2.6.32.45/arch/x86/kernel/traps.c 2011-07-06 19:53:33.000000000 -0400
17521@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
17522
17523 /* Do we ignore FPU interrupts ? */
17524 char ignore_fpu_irq;
17525-
17526-/*
17527- * The IDT has to be page-aligned to simplify the Pentium
17528- * F0 0F bug workaround.
17529- */
17530-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17531 #endif
17532
17533 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17534@@ -112,19 +106,19 @@ static inline void preempt_conditional_c
17535 static inline void
17536 die_if_kernel(const char *str, struct pt_regs *regs, long err)
17537 {
17538- if (!user_mode_vm(regs))
17539+ if (!user_mode(regs))
17540 die(str, regs, err);
17541 }
17542 #endif
17543
17544 static void __kprobes
17545-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17546+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17547 long error_code, siginfo_t *info)
17548 {
17549 struct task_struct *tsk = current;
17550
17551 #ifdef CONFIG_X86_32
17552- if (regs->flags & X86_VM_MASK) {
17553+ if (v8086_mode(regs)) {
17554 /*
17555 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17556 * On nmi (interrupt 2), do_trap should not be called.
17557@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
17558 }
17559 #endif
17560
17561- if (!user_mode(regs))
17562+ if (!user_mode_novm(regs))
17563 goto kernel_trap;
17564
17565 #ifdef CONFIG_X86_32
17566@@ -158,7 +152,7 @@ trap_signal:
17567 printk_ratelimit()) {
17568 printk(KERN_INFO
17569 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17570- tsk->comm, tsk->pid, str,
17571+ tsk->comm, task_pid_nr(tsk), str,
17572 regs->ip, regs->sp, error_code);
17573 print_vma_addr(" in ", regs->ip);
17574 printk("\n");
17575@@ -175,8 +169,20 @@ kernel_trap:
17576 if (!fixup_exception(regs)) {
17577 tsk->thread.error_code = error_code;
17578 tsk->thread.trap_no = trapnr;
17579+
17580+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17581+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17582+ str = "PAX: suspicious stack segment fault";
17583+#endif
17584+
17585 die(str, regs, error_code);
17586 }
17587+
17588+#ifdef CONFIG_PAX_REFCOUNT
17589+ if (trapnr == 4)
17590+ pax_report_refcount_overflow(regs);
17591+#endif
17592+
17593 return;
17594
17595 #ifdef CONFIG_X86_32
17596@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
17597 conditional_sti(regs);
17598
17599 #ifdef CONFIG_X86_32
17600- if (regs->flags & X86_VM_MASK)
17601+ if (v8086_mode(regs))
17602 goto gp_in_vm86;
17603 #endif
17604
17605 tsk = current;
17606- if (!user_mode(regs))
17607+ if (!user_mode_novm(regs))
17608 goto gp_in_kernel;
17609
17610+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17611+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17612+ struct mm_struct *mm = tsk->mm;
17613+ unsigned long limit;
17614+
17615+ down_write(&mm->mmap_sem);
17616+ limit = mm->context.user_cs_limit;
17617+ if (limit < TASK_SIZE) {
17618+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17619+ up_write(&mm->mmap_sem);
17620+ return;
17621+ }
17622+ up_write(&mm->mmap_sem);
17623+ }
17624+#endif
17625+
17626 tsk->thread.error_code = error_code;
17627 tsk->thread.trap_no = 13;
17628
17629@@ -305,6 +327,13 @@ gp_in_kernel:
17630 if (notify_die(DIE_GPF, "general protection fault", regs,
17631 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17632 return;
17633+
17634+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17635+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17636+ die("PAX: suspicious general protection fault", regs, error_code);
17637+ else
17638+#endif
17639+
17640 die("general protection fault", regs, error_code);
17641 }
17642
17643@@ -435,6 +464,17 @@ static notrace __kprobes void default_do
17644 dotraplinkage notrace __kprobes void
17645 do_nmi(struct pt_regs *regs, long error_code)
17646 {
17647+
17648+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17649+ if (!user_mode(regs)) {
17650+ unsigned long cs = regs->cs & 0xFFFF;
17651+ unsigned long ip = ktva_ktla(regs->ip);
17652+
17653+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17654+ regs->ip = ip;
17655+ }
17656+#endif
17657+
17658 nmi_enter();
17659
17660 inc_irq_stat(__nmi_count);
17661@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(st
17662 }
17663
17664 #ifdef CONFIG_X86_32
17665- if (regs->flags & X86_VM_MASK)
17666+ if (v8086_mode(regs))
17667 goto debug_vm86;
17668 #endif
17669
17670@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(st
17671 * kernel space (but re-enable TF when returning to user mode).
17672 */
17673 if (condition & DR_STEP) {
17674- if (!user_mode(regs))
17675+ if (!user_mode_novm(regs))
17676 goto clear_TF_reenable;
17677 }
17678
17679@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs
17680 * Handle strange cache flush from user space exception
17681 * in all other cases. This is undocumented behaviour.
17682 */
17683- if (regs->flags & X86_VM_MASK) {
17684+ if (v8086_mode(regs)) {
17685 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
17686 return;
17687 }
17688@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) sm
17689 void __math_state_restore(void)
17690 {
17691 struct thread_info *thread = current_thread_info();
17692- struct task_struct *tsk = thread->task;
17693+ struct task_struct *tsk = current;
17694
17695 /*
17696 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17697@@ -825,8 +865,7 @@ void __math_state_restore(void)
17698 */
17699 asmlinkage void math_state_restore(void)
17700 {
17701- struct thread_info *thread = current_thread_info();
17702- struct task_struct *tsk = thread->task;
17703+ struct task_struct *tsk = current;
17704
17705 if (!tsk_used_math(tsk)) {
17706 local_irq_enable();
17707diff -urNp linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S
17708--- linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S 2011-03-27 14:31:47.000000000 -0400
17709+++ linux-2.6.32.45/arch/x86/kernel/verify_cpu_64.S 1969-12-31 19:00:00.000000000 -0500
17710@@ -1,105 +0,0 @@
17711-/*
17712- *
17713- * verify_cpu.S - Code for cpu long mode and SSE verification. This
17714- * code has been borrowed from boot/setup.S and was introduced by
17715- * Andi Kleen.
17716- *
17717- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17718- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17719- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17720- *
17721- * This source code is licensed under the GNU General Public License,
17722- * Version 2. See the file COPYING for more details.
17723- *
17724- * This is a common code for verification whether CPU supports
17725- * long mode and SSE or not. It is not called directly instead this
17726- * file is included at various places and compiled in that context.
17727- * Following are the current usage.
17728- *
17729- * This file is included by both 16bit and 32bit code.
17730- *
17731- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
17732- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
17733- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
17734- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
17735- *
17736- * verify_cpu, returns the status of cpu check in register %eax.
17737- * 0: Success 1: Failure
17738- *
17739- * The caller needs to check for the error code and take the action
17740- * appropriately. Either display a message or halt.
17741- */
17742-
17743-#include <asm/cpufeature.h>
17744-
17745-verify_cpu:
17746- pushfl # Save caller passed flags
17747- pushl $0 # Kill any dangerous flags
17748- popfl
17749-
17750- pushfl # standard way to check for cpuid
17751- popl %eax
17752- movl %eax,%ebx
17753- xorl $0x200000,%eax
17754- pushl %eax
17755- popfl
17756- pushfl
17757- popl %eax
17758- cmpl %eax,%ebx
17759- jz verify_cpu_no_longmode # cpu has no cpuid
17760-
17761- movl $0x0,%eax # See if cpuid 1 is implemented
17762- cpuid
17763- cmpl $0x1,%eax
17764- jb verify_cpu_no_longmode # no cpuid 1
17765-
17766- xor %di,%di
17767- cmpl $0x68747541,%ebx # AuthenticAMD
17768- jnz verify_cpu_noamd
17769- cmpl $0x69746e65,%edx
17770- jnz verify_cpu_noamd
17771- cmpl $0x444d4163,%ecx
17772- jnz verify_cpu_noamd
17773- mov $1,%di # cpu is from AMD
17774-
17775-verify_cpu_noamd:
17776- movl $0x1,%eax # Does the cpu have what it takes
17777- cpuid
17778- andl $REQUIRED_MASK0,%edx
17779- xorl $REQUIRED_MASK0,%edx
17780- jnz verify_cpu_no_longmode
17781-
17782- movl $0x80000000,%eax # See if extended cpuid is implemented
17783- cpuid
17784- cmpl $0x80000001,%eax
17785- jb verify_cpu_no_longmode # no extended cpuid
17786-
17787- movl $0x80000001,%eax # Does the cpu have what it takes
17788- cpuid
17789- andl $REQUIRED_MASK1,%edx
17790- xorl $REQUIRED_MASK1,%edx
17791- jnz verify_cpu_no_longmode
17792-
17793-verify_cpu_sse_test:
17794- movl $1,%eax
17795- cpuid
17796- andl $SSE_MASK,%edx
17797- cmpl $SSE_MASK,%edx
17798- je verify_cpu_sse_ok
17799- test %di,%di
17800- jz verify_cpu_no_longmode # only try to force SSE on AMD
17801- movl $0xc0010015,%ecx # HWCR
17802- rdmsr
17803- btr $15,%eax # enable SSE
17804- wrmsr
17805- xor %di,%di # don't loop
17806- jmp verify_cpu_sse_test # try again
17807-
17808-verify_cpu_no_longmode:
17809- popfl # Restore caller passed flags
17810- movl $1,%eax
17811- ret
17812-verify_cpu_sse_ok:
17813- popfl # Restore caller passed flags
17814- xorl %eax, %eax
17815- ret
17816diff -urNp linux-2.6.32.45/arch/x86/kernel/verify_cpu.S linux-2.6.32.45/arch/x86/kernel/verify_cpu.S
17817--- linux-2.6.32.45/arch/x86/kernel/verify_cpu.S 1969-12-31 19:00:00.000000000 -0500
17818+++ linux-2.6.32.45/arch/x86/kernel/verify_cpu.S 2011-07-01 18:28:42.000000000 -0400
17819@@ -0,0 +1,140 @@
17820+/*
17821+ *
17822+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
17823+ * code has been borrowed from boot/setup.S and was introduced by
17824+ * Andi Kleen.
17825+ *
17826+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17827+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17828+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17829+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
17830+ *
17831+ * This source code is licensed under the GNU General Public License,
17832+ * Version 2. See the file COPYING for more details.
17833+ *
17834+ * This is a common code for verification whether CPU supports
17835+ * long mode and SSE or not. It is not called directly instead this
17836+ * file is included at various places and compiled in that context.
17837+ * This file is expected to run in 32bit code. Currently:
17838+ *
17839+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17840+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
17841+ * arch/x86/kernel/head_32.S: processor startup
17842+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17843+ *
17844+ * verify_cpu, returns the status of longmode and SSE in register %eax.
17845+ * 0: Success 1: Failure
17846+ *
17847+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
17848+ *
17849+ * The caller needs to check for the error code and take the action
17850+ * appropriately. Either display a message or halt.
17851+ */
17852+
17853+#include <asm/cpufeature.h>
17854+#include <asm/msr-index.h>
17855+
17856+verify_cpu:
17857+ pushfl # Save caller passed flags
17858+ pushl $0 # Kill any dangerous flags
17859+ popfl
17860+
17861+ pushfl # standard way to check for cpuid
17862+ popl %eax
17863+ movl %eax,%ebx
17864+ xorl $0x200000,%eax
17865+ pushl %eax
17866+ popfl
17867+ pushfl
17868+ popl %eax
17869+ cmpl %eax,%ebx
17870+ jz verify_cpu_no_longmode # cpu has no cpuid
17871+
17872+ movl $0x0,%eax # See if cpuid 1 is implemented
17873+ cpuid
17874+ cmpl $0x1,%eax
17875+ jb verify_cpu_no_longmode # no cpuid 1
17876+
17877+ xor %di,%di
17878+ cmpl $0x68747541,%ebx # AuthenticAMD
17879+ jnz verify_cpu_noamd
17880+ cmpl $0x69746e65,%edx
17881+ jnz verify_cpu_noamd
17882+ cmpl $0x444d4163,%ecx
17883+ jnz verify_cpu_noamd
17884+ mov $1,%di # cpu is from AMD
17885+ jmp verify_cpu_check
17886+
17887+verify_cpu_noamd:
17888+ cmpl $0x756e6547,%ebx # GenuineIntel?
17889+ jnz verify_cpu_check
17890+ cmpl $0x49656e69,%edx
17891+ jnz verify_cpu_check
17892+ cmpl $0x6c65746e,%ecx
17893+ jnz verify_cpu_check
17894+
17895+ # only call IA32_MISC_ENABLE when:
17896+ # family > 6 || (family == 6 && model >= 0xd)
17897+ movl $0x1, %eax # check CPU family and model
17898+ cpuid
17899+ movl %eax, %ecx
17900+
17901+ andl $0x0ff00f00, %eax # mask family and extended family
17902+ shrl $8, %eax
17903+ cmpl $6, %eax
17904+ ja verify_cpu_clear_xd # family > 6, ok
17905+ jb verify_cpu_check # family < 6, skip
17906+
17907+ andl $0x000f00f0, %ecx # mask model and extended model
17908+ shrl $4, %ecx
17909+ cmpl $0xd, %ecx
17910+ jb verify_cpu_check # family == 6, model < 0xd, skip
17911+
17912+verify_cpu_clear_xd:
17913+ movl $MSR_IA32_MISC_ENABLE, %ecx
17914+ rdmsr
17915+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
17916+ jnc verify_cpu_check # only write MSR if bit was changed
17917+ wrmsr
17918+
17919+verify_cpu_check:
17920+ movl $0x1,%eax # Does the cpu have what it takes
17921+ cpuid
17922+ andl $REQUIRED_MASK0,%edx
17923+ xorl $REQUIRED_MASK0,%edx
17924+ jnz verify_cpu_no_longmode
17925+
17926+ movl $0x80000000,%eax # See if extended cpuid is implemented
17927+ cpuid
17928+ cmpl $0x80000001,%eax
17929+ jb verify_cpu_no_longmode # no extended cpuid
17930+
17931+ movl $0x80000001,%eax # Does the cpu have what it takes
17932+ cpuid
17933+ andl $REQUIRED_MASK1,%edx
17934+ xorl $REQUIRED_MASK1,%edx
17935+ jnz verify_cpu_no_longmode
17936+
17937+verify_cpu_sse_test:
17938+ movl $1,%eax
17939+ cpuid
17940+ andl $SSE_MASK,%edx
17941+ cmpl $SSE_MASK,%edx
17942+ je verify_cpu_sse_ok
17943+ test %di,%di
17944+ jz verify_cpu_no_longmode # only try to force SSE on AMD
17945+ movl $MSR_K7_HWCR,%ecx
17946+ rdmsr
17947+ btr $15,%eax # enable SSE
17948+ wrmsr
17949+ xor %di,%di # don't loop
17950+ jmp verify_cpu_sse_test # try again
17951+
17952+verify_cpu_no_longmode:
17953+ popfl # Restore caller passed flags
17954+ movl $1,%eax
17955+ ret
17956+verify_cpu_sse_ok:
17957+ popfl # Restore caller passed flags
17958+ xorl %eax, %eax
17959+ ret
17960diff -urNp linux-2.6.32.45/arch/x86/kernel/vm86_32.c linux-2.6.32.45/arch/x86/kernel/vm86_32.c
17961--- linux-2.6.32.45/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
17962+++ linux-2.6.32.45/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
17963@@ -41,6 +41,7 @@
17964 #include <linux/ptrace.h>
17965 #include <linux/audit.h>
17966 #include <linux/stddef.h>
17967+#include <linux/grsecurity.h>
17968
17969 #include <asm/uaccess.h>
17970 #include <asm/io.h>
17971@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
17972 do_exit(SIGSEGV);
17973 }
17974
17975- tss = &per_cpu(init_tss, get_cpu());
17976+ tss = init_tss + get_cpu();
17977 current->thread.sp0 = current->thread.saved_sp0;
17978 current->thread.sysenter_cs = __KERNEL_CS;
17979 load_sp0(tss, &current->thread);
17980@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
17981 struct task_struct *tsk;
17982 int tmp, ret = -EPERM;
17983
17984+#ifdef CONFIG_GRKERNSEC_VM86
17985+ if (!capable(CAP_SYS_RAWIO)) {
17986+ gr_handle_vm86();
17987+ goto out;
17988+ }
17989+#endif
17990+
17991 tsk = current;
17992 if (tsk->thread.saved_sp0)
17993 goto out;
17994@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
17995 int tmp, ret;
17996 struct vm86plus_struct __user *v86;
17997
17998+#ifdef CONFIG_GRKERNSEC_VM86
17999+ if (!capable(CAP_SYS_RAWIO)) {
18000+ gr_handle_vm86();
18001+ ret = -EPERM;
18002+ goto out;
18003+ }
18004+#endif
18005+
18006 tsk = current;
18007 switch (regs->bx) {
18008 case VM86_REQUEST_IRQ:
18009@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
18010 tsk->thread.saved_fs = info->regs32->fs;
18011 tsk->thread.saved_gs = get_user_gs(info->regs32);
18012
18013- tss = &per_cpu(init_tss, get_cpu());
18014+ tss = init_tss + get_cpu();
18015 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
18016 if (cpu_has_sep)
18017 tsk->thread.sysenter_cs = 0;
18018@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
18019 goto cannot_handle;
18020 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
18021 goto cannot_handle;
18022- intr_ptr = (unsigned long __user *) (i << 2);
18023+ intr_ptr = (__force unsigned long __user *) (i << 2);
18024 if (get_user(segoffs, intr_ptr))
18025 goto cannot_handle;
18026 if ((segoffs >> 16) == BIOSSEG)
18027diff -urNp linux-2.6.32.45/arch/x86/kernel/vmi_32.c linux-2.6.32.45/arch/x86/kernel/vmi_32.c
18028--- linux-2.6.32.45/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
18029+++ linux-2.6.32.45/arch/x86/kernel/vmi_32.c 2011-08-05 20:33:55.000000000 -0400
18030@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
18031 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
18032
18033 #define call_vrom_func(rom,func) \
18034- (((VROMFUNC *)(rom->func))())
18035+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
18036
18037 #define call_vrom_long_func(rom,func,arg) \
18038- (((VROMLONGFUNC *)(rom->func)) (arg))
18039+({\
18040+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
18041+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
18042+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
18043+ __reloc;\
18044+})
18045
18046-static struct vrom_header *vmi_rom;
18047+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
18048 static int disable_pge;
18049 static int disable_pse;
18050 static int disable_sep;
18051@@ -76,10 +81,10 @@ static struct {
18052 void (*set_initial_ap_state)(int, int);
18053 void (*halt)(void);
18054 void (*set_lazy_mode)(int mode);
18055-} vmi_ops;
18056+} __no_const vmi_ops __read_only;
18057
18058 /* Cached VMI operations */
18059-struct vmi_timer_ops vmi_timer_ops;
18060+struct vmi_timer_ops vmi_timer_ops __read_only;
18061
18062 /*
18063 * VMI patching routines.
18064@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
18065 static inline void patch_offset(void *insnbuf,
18066 unsigned long ip, unsigned long dest)
18067 {
18068- *(unsigned long *)(insnbuf+1) = dest-ip-5;
18069+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
18070 }
18071
18072 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
18073@@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
18074 {
18075 u64 reloc;
18076 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
18077+
18078 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
18079 switch(rel->type) {
18080 case VMI_RELOCATION_CALL_REL:
18081@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
18082
18083 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
18084 {
18085- const pte_t pte = { .pte = 0 };
18086+ const pte_t pte = __pte(0ULL);
18087 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
18088 }
18089
18090 static void vmi_pmd_clear(pmd_t *pmd)
18091 {
18092- const pte_t pte = { .pte = 0 };
18093+ const pte_t pte = __pte(0ULL);
18094 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
18095 }
18096 #endif
18097@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
18098 ap.ss = __KERNEL_DS;
18099 ap.esp = (unsigned long) start_esp;
18100
18101- ap.ds = __USER_DS;
18102- ap.es = __USER_DS;
18103+ ap.ds = __KERNEL_DS;
18104+ ap.es = __KERNEL_DS;
18105 ap.fs = __KERNEL_PERCPU;
18106- ap.gs = __KERNEL_STACK_CANARY;
18107+ savesegment(gs, ap.gs);
18108
18109 ap.eflags = 0;
18110
18111@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
18112 paravirt_leave_lazy_mmu();
18113 }
18114
18115+#ifdef CONFIG_PAX_KERNEXEC
18116+static unsigned long vmi_pax_open_kernel(void)
18117+{
18118+ return 0;
18119+}
18120+
18121+static unsigned long vmi_pax_close_kernel(void)
18122+{
18123+ return 0;
18124+}
18125+#endif
18126+
18127 static inline int __init check_vmi_rom(struct vrom_header *rom)
18128 {
18129 struct pci_header *pci;
18130@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
18131 return 0;
18132 if (rom->vrom_signature != VMI_SIGNATURE)
18133 return 0;
18134+ if (rom->rom_length * 512 > sizeof(*rom)) {
18135+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
18136+ return 0;
18137+ }
18138 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
18139 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
18140 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
18141@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
18142 struct vrom_header *romstart;
18143 romstart = (struct vrom_header *)isa_bus_to_virt(base);
18144 if (check_vmi_rom(romstart)) {
18145- vmi_rom = romstart;
18146+ vmi_rom = *romstart;
18147 return 1;
18148 }
18149 }
18150@@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
18151
18152 para_fill(pv_irq_ops.safe_halt, Halt);
18153
18154+#ifdef CONFIG_PAX_KERNEXEC
18155+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
18156+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
18157+#endif
18158+
18159 /*
18160 * Alternative instruction rewriting doesn't happen soon enough
18161 * to convert VMI_IRET to a call instead of a jump; so we have
18162@@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
18163
18164 void __init vmi_init(void)
18165 {
18166- if (!vmi_rom)
18167+ if (!vmi_rom.rom_signature)
18168 probe_vmi_rom();
18169 else
18170- check_vmi_rom(vmi_rom);
18171+ check_vmi_rom(&vmi_rom);
18172
18173 /* In case probing for or validating the ROM failed, basil */
18174- if (!vmi_rom)
18175+ if (!vmi_rom.rom_signature)
18176 return;
18177
18178- reserve_top_address(-vmi_rom->virtual_top);
18179+ reserve_top_address(-vmi_rom.virtual_top);
18180
18181 #ifdef CONFIG_X86_IO_APIC
18182 /* This is virtual hardware; timer routing is wired correctly */
18183@@ -874,7 +901,7 @@ void __init vmi_activate(void)
18184 {
18185 unsigned long flags;
18186
18187- if (!vmi_rom)
18188+ if (!vmi_rom.rom_signature)
18189 return;
18190
18191 local_irq_save(flags);
18192diff -urNp linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S
18193--- linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
18194+++ linux-2.6.32.45/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
18195@@ -26,6 +26,13 @@
18196 #include <asm/page_types.h>
18197 #include <asm/cache.h>
18198 #include <asm/boot.h>
18199+#include <asm/segment.h>
18200+
18201+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18202+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
18203+#else
18204+#define __KERNEL_TEXT_OFFSET 0
18205+#endif
18206
18207 #undef i386 /* in case the preprocessor is a 32bit one */
18208
18209@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
18210 #ifdef CONFIG_X86_32
18211 OUTPUT_ARCH(i386)
18212 ENTRY(phys_startup_32)
18213-jiffies = jiffies_64;
18214 #else
18215 OUTPUT_ARCH(i386:x86-64)
18216 ENTRY(phys_startup_64)
18217-jiffies_64 = jiffies;
18218 #endif
18219
18220 PHDRS {
18221 text PT_LOAD FLAGS(5); /* R_E */
18222- data PT_LOAD FLAGS(7); /* RWE */
18223+#ifdef CONFIG_X86_32
18224+ module PT_LOAD FLAGS(5); /* R_E */
18225+#endif
18226+#ifdef CONFIG_XEN
18227+ rodata PT_LOAD FLAGS(5); /* R_E */
18228+#else
18229+ rodata PT_LOAD FLAGS(4); /* R__ */
18230+#endif
18231+ data PT_LOAD FLAGS(6); /* RW_ */
18232 #ifdef CONFIG_X86_64
18233 user PT_LOAD FLAGS(5); /* R_E */
18234+#endif
18235+ init.begin PT_LOAD FLAGS(6); /* RW_ */
18236 #ifdef CONFIG_SMP
18237 percpu PT_LOAD FLAGS(6); /* RW_ */
18238 #endif
18239+ text.init PT_LOAD FLAGS(5); /* R_E */
18240+ text.exit PT_LOAD FLAGS(5); /* R_E */
18241 init PT_LOAD FLAGS(7); /* RWE */
18242-#endif
18243 note PT_NOTE FLAGS(0); /* ___ */
18244 }
18245
18246 SECTIONS
18247 {
18248 #ifdef CONFIG_X86_32
18249- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
18250- phys_startup_32 = startup_32 - LOAD_OFFSET;
18251+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
18252 #else
18253- . = __START_KERNEL;
18254- phys_startup_64 = startup_64 - LOAD_OFFSET;
18255+ . = __START_KERNEL;
18256 #endif
18257
18258 /* Text and read-only data */
18259- .text : AT(ADDR(.text) - LOAD_OFFSET) {
18260- _text = .;
18261+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18262 /* bootstrapping code */
18263+#ifdef CONFIG_X86_32
18264+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18265+#else
18266+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18267+#endif
18268+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18269+ _text = .;
18270 HEAD_TEXT
18271 #ifdef CONFIG_X86_32
18272 . = ALIGN(PAGE_SIZE);
18273@@ -82,28 +102,71 @@ SECTIONS
18274 IRQENTRY_TEXT
18275 *(.fixup)
18276 *(.gnu.warning)
18277- /* End of text section */
18278- _etext = .;
18279 } :text = 0x9090
18280
18281- NOTES :text :note
18282+ . += __KERNEL_TEXT_OFFSET;
18283+
18284+#ifdef CONFIG_X86_32
18285+ . = ALIGN(PAGE_SIZE);
18286+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
18287+ *(.vmi.rom)
18288+ } :module
18289+
18290+ . = ALIGN(PAGE_SIZE);
18291+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
18292+
18293+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
18294+ MODULES_EXEC_VADDR = .;
18295+ BYTE(0)
18296+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
18297+ . = ALIGN(HPAGE_SIZE);
18298+ MODULES_EXEC_END = . - 1;
18299+#endif
18300+
18301+ } :module
18302+#endif
18303
18304- EXCEPTION_TABLE(16) :text = 0x9090
18305+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
18306+ /* End of text section */
18307+ _etext = . - __KERNEL_TEXT_OFFSET;
18308+ }
18309+
18310+#ifdef CONFIG_X86_32
18311+ . = ALIGN(PAGE_SIZE);
18312+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
18313+ *(.idt)
18314+ . = ALIGN(PAGE_SIZE);
18315+ *(.empty_zero_page)
18316+ *(.swapper_pg_fixmap)
18317+ *(.swapper_pg_pmd)
18318+ *(.swapper_pg_dir)
18319+ *(.trampoline_pg_dir)
18320+ } :rodata
18321+#endif
18322+
18323+ . = ALIGN(PAGE_SIZE);
18324+ NOTES :rodata :note
18325+
18326+ EXCEPTION_TABLE(16) :rodata
18327
18328 RO_DATA(PAGE_SIZE)
18329
18330 /* Data */
18331 .data : AT(ADDR(.data) - LOAD_OFFSET) {
18332+
18333+#ifdef CONFIG_PAX_KERNEXEC
18334+ . = ALIGN(HPAGE_SIZE);
18335+#else
18336+ . = ALIGN(PAGE_SIZE);
18337+#endif
18338+
18339 /* Start of data section */
18340 _sdata = .;
18341
18342 /* init_task */
18343 INIT_TASK_DATA(THREAD_SIZE)
18344
18345-#ifdef CONFIG_X86_32
18346- /* 32 bit has nosave before _edata */
18347 NOSAVE_DATA
18348-#endif
18349
18350 PAGE_ALIGNED_DATA(PAGE_SIZE)
18351
18352@@ -112,6 +175,8 @@ SECTIONS
18353 DATA_DATA
18354 CONSTRUCTORS
18355
18356+ jiffies = jiffies_64;
18357+
18358 /* rarely changed data like cpu maps */
18359 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
18360
18361@@ -166,12 +231,6 @@ SECTIONS
18362 }
18363 vgetcpu_mode = VVIRT(.vgetcpu_mode);
18364
18365- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
18366- .jiffies : AT(VLOAD(.jiffies)) {
18367- *(.jiffies)
18368- }
18369- jiffies = VVIRT(.jiffies);
18370-
18371 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
18372 *(.vsyscall_3)
18373 }
18374@@ -187,12 +246,19 @@ SECTIONS
18375 #endif /* CONFIG_X86_64 */
18376
18377 /* Init code and data - will be freed after init */
18378- . = ALIGN(PAGE_SIZE);
18379 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18380+ BYTE(0)
18381+
18382+#ifdef CONFIG_PAX_KERNEXEC
18383+ . = ALIGN(HPAGE_SIZE);
18384+#else
18385+ . = ALIGN(PAGE_SIZE);
18386+#endif
18387+
18388 __init_begin = .; /* paired with __init_end */
18389- }
18390+ } :init.begin
18391
18392-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18393+#ifdef CONFIG_SMP
18394 /*
18395 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18396 * output PHDR, so the next output section - .init.text - should
18397@@ -201,12 +267,27 @@ SECTIONS
18398 PERCPU_VADDR(0, :percpu)
18399 #endif
18400
18401- INIT_TEXT_SECTION(PAGE_SIZE)
18402-#ifdef CONFIG_X86_64
18403- :init
18404-#endif
18405+ . = ALIGN(PAGE_SIZE);
18406+ init_begin = .;
18407+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18408+ VMLINUX_SYMBOL(_sinittext) = .;
18409+ INIT_TEXT
18410+ VMLINUX_SYMBOL(_einittext) = .;
18411+ . = ALIGN(PAGE_SIZE);
18412+ } :text.init
18413
18414- INIT_DATA_SECTION(16)
18415+ /*
18416+ * .exit.text is discard at runtime, not link time, to deal with
18417+ * references from .altinstructions and .eh_frame
18418+ */
18419+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18420+ EXIT_TEXT
18421+ . = ALIGN(16);
18422+ } :text.exit
18423+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18424+
18425+ . = ALIGN(PAGE_SIZE);
18426+ INIT_DATA_SECTION(16) :init
18427
18428 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
18429 __x86_cpu_dev_start = .;
18430@@ -232,19 +313,11 @@ SECTIONS
18431 *(.altinstr_replacement)
18432 }
18433
18434- /*
18435- * .exit.text is discard at runtime, not link time, to deal with
18436- * references from .altinstructions and .eh_frame
18437- */
18438- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18439- EXIT_TEXT
18440- }
18441-
18442 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18443 EXIT_DATA
18444 }
18445
18446-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18447+#ifndef CONFIG_SMP
18448 PERCPU(PAGE_SIZE)
18449 #endif
18450
18451@@ -267,12 +340,6 @@ SECTIONS
18452 . = ALIGN(PAGE_SIZE);
18453 }
18454
18455-#ifdef CONFIG_X86_64
18456- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18457- NOSAVE_DATA
18458- }
18459-#endif
18460-
18461 /* BSS */
18462 . = ALIGN(PAGE_SIZE);
18463 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18464@@ -288,6 +355,7 @@ SECTIONS
18465 __brk_base = .;
18466 . += 64 * 1024; /* 64k alignment slop space */
18467 *(.brk_reservation) /* areas brk users have reserved */
18468+ . = ALIGN(HPAGE_SIZE);
18469 __brk_limit = .;
18470 }
18471
18472@@ -316,13 +384,12 @@ SECTIONS
18473 * for the boot processor.
18474 */
18475 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
18476-INIT_PER_CPU(gdt_page);
18477 INIT_PER_CPU(irq_stack_union);
18478
18479 /*
18480 * Build-time check on the image size:
18481 */
18482-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18483+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18484 "kernel image bigger than KERNEL_IMAGE_SIZE");
18485
18486 #ifdef CONFIG_SMP
18487diff -urNp linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c
18488--- linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
18489+++ linux-2.6.32.45/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
18490@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
18491
18492 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
18493 /* copy vsyscall data */
18494+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
18495 vsyscall_gtod_data.clock.vread = clock->vread;
18496 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
18497 vsyscall_gtod_data.clock.mask = clock->mask;
18498@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
18499 We do this here because otherwise user space would do it on
18500 its own in a likely inferior way (no access to jiffies).
18501 If you don't like it pass NULL. */
18502- if (tcache && tcache->blob[0] == (j = __jiffies)) {
18503+ if (tcache && tcache->blob[0] == (j = jiffies)) {
18504 p = tcache->blob[1];
18505 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
18506 /* Load per CPU data from RDTSCP */
18507diff -urNp linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c
18508--- linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
18509+++ linux-2.6.32.45/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
18510@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
18511
18512 EXPORT_SYMBOL(copy_user_generic);
18513 EXPORT_SYMBOL(__copy_user_nocache);
18514-EXPORT_SYMBOL(copy_from_user);
18515-EXPORT_SYMBOL(copy_to_user);
18516 EXPORT_SYMBOL(__copy_from_user_inatomic);
18517
18518 EXPORT_SYMBOL(copy_page);
18519diff -urNp linux-2.6.32.45/arch/x86/kernel/xsave.c linux-2.6.32.45/arch/x86/kernel/xsave.c
18520--- linux-2.6.32.45/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
18521+++ linux-2.6.32.45/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
18522@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
18523 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18524 return -1;
18525
18526- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18527+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18528 fx_sw_user->extended_size -
18529 FP_XSTATE_MAGIC2_SIZE));
18530 /*
18531@@ -196,7 +196,7 @@ fx_only:
18532 * the other extended state.
18533 */
18534 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18535- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18536+ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
18537 }
18538
18539 /*
18540@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
18541 if (task_thread_info(tsk)->status & TS_XSAVE)
18542 err = restore_user_xstate(buf);
18543 else
18544- err = fxrstor_checking((__force struct i387_fxsave_struct *)
18545+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
18546 buf);
18547 if (unlikely(err)) {
18548 /*
18549diff -urNp linux-2.6.32.45/arch/x86/kvm/emulate.c linux-2.6.32.45/arch/x86/kvm/emulate.c
18550--- linux-2.6.32.45/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
18551+++ linux-2.6.32.45/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
18552@@ -81,8 +81,8 @@
18553 #define Src2CL (1<<29)
18554 #define Src2ImmByte (2<<29)
18555 #define Src2One (3<<29)
18556-#define Src2Imm16 (4<<29)
18557-#define Src2Mask (7<<29)
18558+#define Src2Imm16 (4U<<29)
18559+#define Src2Mask (7U<<29)
18560
18561 enum {
18562 Group1_80, Group1_81, Group1_82, Group1_83,
18563@@ -411,6 +411,7 @@ static u32 group2_table[] = {
18564
18565 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
18566 do { \
18567+ unsigned long _tmp; \
18568 __asm__ __volatile__ ( \
18569 _PRE_EFLAGS("0", "4", "2") \
18570 _op _suffix " %"_x"3,%1; " \
18571@@ -424,8 +425,6 @@ static u32 group2_table[] = {
18572 /* Raw emulation: instruction has two explicit operands. */
18573 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18574 do { \
18575- unsigned long _tmp; \
18576- \
18577 switch ((_dst).bytes) { \
18578 case 2: \
18579 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
18580@@ -441,7 +440,6 @@ static u32 group2_table[] = {
18581
18582 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18583 do { \
18584- unsigned long _tmp; \
18585 switch ((_dst).bytes) { \
18586 case 1: \
18587 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
18588diff -urNp linux-2.6.32.45/arch/x86/kvm/lapic.c linux-2.6.32.45/arch/x86/kvm/lapic.c
18589--- linux-2.6.32.45/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
18590+++ linux-2.6.32.45/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
18591@@ -52,7 +52,7 @@
18592 #define APIC_BUS_CYCLE_NS 1
18593
18594 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18595-#define apic_debug(fmt, arg...)
18596+#define apic_debug(fmt, arg...) do {} while (0)
18597
18598 #define APIC_LVT_NUM 6
18599 /* 14 is the version for Xeon and Pentium 8.4.8*/
18600diff -urNp linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h
18601--- linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
18602+++ linux-2.6.32.45/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
18603@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
18604 int level = PT_PAGE_TABLE_LEVEL;
18605 unsigned long mmu_seq;
18606
18607+ pax_track_stack();
18608+
18609 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18610 kvm_mmu_audit(vcpu, "pre page fault");
18611
18612diff -urNp linux-2.6.32.45/arch/x86/kvm/svm.c linux-2.6.32.45/arch/x86/kvm/svm.c
18613--- linux-2.6.32.45/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
18614+++ linux-2.6.32.45/arch/x86/kvm/svm.c 2011-08-05 20:33:55.000000000 -0400
18615@@ -2485,7 +2485,11 @@ static void reload_tss(struct kvm_vcpu *
18616 int cpu = raw_smp_processor_id();
18617
18618 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
18619+
18620+ pax_open_kernel();
18621 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
18622+ pax_close_kernel();
18623+
18624 load_TR_desc();
18625 }
18626
18627@@ -2946,7 +2950,7 @@ static bool svm_gb_page_enable(void)
18628 return true;
18629 }
18630
18631-static struct kvm_x86_ops svm_x86_ops = {
18632+static const struct kvm_x86_ops svm_x86_ops = {
18633 .cpu_has_kvm_support = has_svm,
18634 .disabled_by_bios = is_disabled,
18635 .hardware_setup = svm_hardware_setup,
18636diff -urNp linux-2.6.32.45/arch/x86/kvm/vmx.c linux-2.6.32.45/arch/x86/kvm/vmx.c
18637--- linux-2.6.32.45/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
18638+++ linux-2.6.32.45/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
18639@@ -570,7 +570,11 @@ static void reload_tss(void)
18640
18641 kvm_get_gdt(&gdt);
18642 descs = (void *)gdt.base;
18643+
18644+ pax_open_kernel();
18645 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18646+ pax_close_kernel();
18647+
18648 load_TR_desc();
18649 }
18650
18651@@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
18652 if (!cpu_has_vmx_flexpriority())
18653 flexpriority_enabled = 0;
18654
18655- if (!cpu_has_vmx_tpr_shadow())
18656- kvm_x86_ops->update_cr8_intercept = NULL;
18657+ if (!cpu_has_vmx_tpr_shadow()) {
18658+ pax_open_kernel();
18659+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18660+ pax_close_kernel();
18661+ }
18662
18663 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18664 kvm_disable_largepages();
18665@@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
18666 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
18667
18668 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
18669- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
18670+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
18671 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
18672 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
18673 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
18674@@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
18675 "jmp .Lkvm_vmx_return \n\t"
18676 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18677 ".Lkvm_vmx_return: "
18678+
18679+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18680+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18681+ ".Lkvm_vmx_return2: "
18682+#endif
18683+
18684 /* Save guest registers, load host registers, keep flags */
18685 "xchg %0, (%%"R"sp) \n\t"
18686 "mov %%"R"ax, %c[rax](%0) \n\t"
18687@@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
18688 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
18689 #endif
18690 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
18691+
18692+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18693+ ,[cs]"i"(__KERNEL_CS)
18694+#endif
18695+
18696 : "cc", "memory"
18697- , R"bx", R"di", R"si"
18698+ , R"ax", R"bx", R"di", R"si"
18699 #ifdef CONFIG_X86_64
18700 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
18701 #endif
18702@@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
18703 if (vmx->rmode.irq.pending)
18704 fixup_rmode_irq(vmx);
18705
18706- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18707+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18708+
18709+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18710+ loadsegment(fs, __KERNEL_PERCPU);
18711+#endif
18712+
18713+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18714+ __set_fs(current_thread_info()->addr_limit);
18715+#endif
18716+
18717 vmx->launched = 1;
18718
18719 vmx_complete_interrupts(vmx);
18720@@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
18721 return false;
18722 }
18723
18724-static struct kvm_x86_ops vmx_x86_ops = {
18725+static const struct kvm_x86_ops vmx_x86_ops = {
18726 .cpu_has_kvm_support = cpu_has_kvm_support,
18727 .disabled_by_bios = vmx_disabled_by_bios,
18728 .hardware_setup = hardware_setup,
18729diff -urNp linux-2.6.32.45/arch/x86/kvm/x86.c linux-2.6.32.45/arch/x86/kvm/x86.c
18730--- linux-2.6.32.45/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
18731+++ linux-2.6.32.45/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
18732@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
18733 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
18734 struct kvm_cpuid_entry2 __user *entries);
18735
18736-struct kvm_x86_ops *kvm_x86_ops;
18737+const struct kvm_x86_ops *kvm_x86_ops;
18738 EXPORT_SYMBOL_GPL(kvm_x86_ops);
18739
18740 int ignore_msrs = 0;
18741@@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
18742 struct kvm_cpuid2 *cpuid,
18743 struct kvm_cpuid_entry2 __user *entries)
18744 {
18745- int r;
18746+ int r, i;
18747
18748 r = -E2BIG;
18749 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18750 goto out;
18751 r = -EFAULT;
18752- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18753- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18754+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18755 goto out;
18756+ for (i = 0; i < cpuid->nent; ++i) {
18757+ struct kvm_cpuid_entry2 cpuid_entry;
18758+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18759+ goto out;
18760+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
18761+ }
18762 vcpu->arch.cpuid_nent = cpuid->nent;
18763 kvm_apic_set_version(vcpu);
18764 return 0;
18765@@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
18766 struct kvm_cpuid2 *cpuid,
18767 struct kvm_cpuid_entry2 __user *entries)
18768 {
18769- int r;
18770+ int r, i;
18771
18772 vcpu_load(vcpu);
18773 r = -E2BIG;
18774 if (cpuid->nent < vcpu->arch.cpuid_nent)
18775 goto out;
18776 r = -EFAULT;
18777- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18778- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18779+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18780 goto out;
18781+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18782+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18783+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18784+ goto out;
18785+ }
18786 return 0;
18787
18788 out:
18789@@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
18790 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18791 struct kvm_interrupt *irq)
18792 {
18793- if (irq->irq < 0 || irq->irq >= 256)
18794+ if (irq->irq >= 256)
18795 return -EINVAL;
18796 if (irqchip_in_kernel(vcpu->kvm))
18797 return -ENXIO;
18798@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
18799 .notifier_call = kvmclock_cpufreq_notifier
18800 };
18801
18802-int kvm_arch_init(void *opaque)
18803+int kvm_arch_init(const void *opaque)
18804 {
18805 int r, cpu;
18806- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18807+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
18808
18809 if (kvm_x86_ops) {
18810 printk(KERN_ERR "kvm: already loaded the other module\n");
18811diff -urNp linux-2.6.32.45/arch/x86/lguest/boot.c linux-2.6.32.45/arch/x86/lguest/boot.c
18812--- linux-2.6.32.45/arch/x86/lguest/boot.c 2011-03-27 14:31:47.000000000 -0400
18813+++ linux-2.6.32.45/arch/x86/lguest/boot.c 2011-08-05 20:33:55.000000000 -0400
18814@@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vt
18815 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18816 * Launcher to reboot us.
18817 */
18818-static void lguest_restart(char *reason)
18819+static __noreturn void lguest_restart(char *reason)
18820 {
18821 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
18822+ BUG();
18823 }
18824
18825 /*G:050
18826diff -urNp linux-2.6.32.45/arch/x86/lib/atomic64_32.c linux-2.6.32.45/arch/x86/lib/atomic64_32.c
18827--- linux-2.6.32.45/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
18828+++ linux-2.6.32.45/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
18829@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
18830 }
18831 EXPORT_SYMBOL(atomic64_cmpxchg);
18832
18833+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
18834+{
18835+ return cmpxchg8b(&ptr->counter, old_val, new_val);
18836+}
18837+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
18838+
18839 /**
18840 * atomic64_xchg - xchg atomic64 variable
18841 * @ptr: pointer to type atomic64_t
18842@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
18843 EXPORT_SYMBOL(atomic64_xchg);
18844
18845 /**
18846+ * atomic64_xchg_unchecked - xchg atomic64 variable
18847+ * @ptr: pointer to type atomic64_unchecked_t
18848+ * @new_val: value to assign
18849+ *
18850+ * Atomically xchgs the value of @ptr to @new_val and returns
18851+ * the old value.
18852+ */
18853+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18854+{
18855+ /*
18856+ * Try first with a (possibly incorrect) assumption about
18857+ * what we have there. We'll do two loops most likely,
18858+ * but we'll get an ownership MESI transaction straight away
18859+ * instead of a read transaction followed by a
18860+ * flush-for-ownership transaction:
18861+ */
18862+ u64 old_val, real_val = 0;
18863+
18864+ do {
18865+ old_val = real_val;
18866+
18867+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18868+
18869+ } while (real_val != old_val);
18870+
18871+ return old_val;
18872+}
18873+EXPORT_SYMBOL(atomic64_xchg_unchecked);
18874+
18875+/**
18876 * atomic64_set - set atomic64 variable
18877 * @ptr: pointer to type atomic64_t
18878 * @new_val: value to assign
18879@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
18880 EXPORT_SYMBOL(atomic64_set);
18881
18882 /**
18883-EXPORT_SYMBOL(atomic64_read);
18884+ * atomic64_unchecked_set - set atomic64 variable
18885+ * @ptr: pointer to type atomic64_unchecked_t
18886+ * @new_val: value to assign
18887+ *
18888+ * Atomically sets the value of @ptr to @new_val.
18889+ */
18890+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18891+{
18892+ atomic64_xchg_unchecked(ptr, new_val);
18893+}
18894+EXPORT_SYMBOL(atomic64_set_unchecked);
18895+
18896+/**
18897 * atomic64_add_return - add and return
18898 * @delta: integer value to add
18899 * @ptr: pointer to type atomic64_t
18900@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
18901 }
18902 EXPORT_SYMBOL(atomic64_add_return);
18903
18904+/**
18905+ * atomic64_add_return_unchecked - add and return
18906+ * @delta: integer value to add
18907+ * @ptr: pointer to type atomic64_unchecked_t
18908+ *
18909+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
18910+ */
18911+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18912+{
18913+ /*
18914+ * Try first with a (possibly incorrect) assumption about
18915+ * what we have there. We'll do two loops most likely,
18916+ * but we'll get an ownership MESI transaction straight away
18917+ * instead of a read transaction followed by a
18918+ * flush-for-ownership transaction:
18919+ */
18920+ u64 old_val, new_val, real_val = 0;
18921+
18922+ do {
18923+ old_val = real_val;
18924+ new_val = old_val + delta;
18925+
18926+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18927+
18928+ } while (real_val != old_val);
18929+
18930+ return new_val;
18931+}
18932+EXPORT_SYMBOL(atomic64_add_return_unchecked);
18933+
18934 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
18935 {
18936 return atomic64_add_return(-delta, ptr);
18937 }
18938 EXPORT_SYMBOL(atomic64_sub_return);
18939
18940+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18941+{
18942+ return atomic64_add_return_unchecked(-delta, ptr);
18943+}
18944+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
18945+
18946 u64 atomic64_inc_return(atomic64_t *ptr)
18947 {
18948 return atomic64_add_return(1, ptr);
18949 }
18950 EXPORT_SYMBOL(atomic64_inc_return);
18951
18952+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
18953+{
18954+ return atomic64_add_return_unchecked(1, ptr);
18955+}
18956+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
18957+
18958 u64 atomic64_dec_return(atomic64_t *ptr)
18959 {
18960 return atomic64_sub_return(1, ptr);
18961 }
18962 EXPORT_SYMBOL(atomic64_dec_return);
18963
18964+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
18965+{
18966+ return atomic64_sub_return_unchecked(1, ptr);
18967+}
18968+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
18969+
18970 /**
18971 * atomic64_add - add integer to atomic64 variable
18972 * @delta: integer value to add
18973@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
18974 EXPORT_SYMBOL(atomic64_add);
18975
18976 /**
18977+ * atomic64_add_unchecked - add integer to atomic64 variable
18978+ * @delta: integer value to add
18979+ * @ptr: pointer to type atomic64_unchecked_t
18980+ *
18981+ * Atomically adds @delta to @ptr.
18982+ */
18983+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18984+{
18985+ atomic64_add_return_unchecked(delta, ptr);
18986+}
18987+EXPORT_SYMBOL(atomic64_add_unchecked);
18988+
18989+/**
18990 * atomic64_sub - subtract the atomic64 variable
18991 * @delta: integer value to subtract
18992 * @ptr: pointer to type atomic64_t
18993@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
18994 EXPORT_SYMBOL(atomic64_sub);
18995
18996 /**
18997+ * atomic64_sub_unchecked - subtract the atomic64 variable
18998+ * @delta: integer value to subtract
18999+ * @ptr: pointer to type atomic64_unchecked_t
19000+ *
19001+ * Atomically subtracts @delta from @ptr.
19002+ */
19003+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
19004+{
19005+ atomic64_add_unchecked(-delta, ptr);
19006+}
19007+EXPORT_SYMBOL(atomic64_sub_unchecked);
19008+
19009+/**
19010 * atomic64_sub_and_test - subtract value from variable and test result
19011 * @delta: integer value to subtract
19012 * @ptr: pointer to type atomic64_t
19013@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
19014 EXPORT_SYMBOL(atomic64_inc);
19015
19016 /**
19017+ * atomic64_inc_unchecked - increment atomic64 variable
19018+ * @ptr: pointer to type atomic64_unchecked_t
19019+ *
19020+ * Atomically increments @ptr by 1.
19021+ */
19022+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
19023+{
19024+ atomic64_add_unchecked(1, ptr);
19025+}
19026+EXPORT_SYMBOL(atomic64_inc_unchecked);
19027+
19028+/**
19029 * atomic64_dec - decrement atomic64 variable
19030 * @ptr: pointer to type atomic64_t
19031 *
19032@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
19033 EXPORT_SYMBOL(atomic64_dec);
19034
19035 /**
19036+ * atomic64_dec_unchecked - decrement atomic64 variable
19037+ * @ptr: pointer to type atomic64_unchecked_t
19038+ *
19039+ * Atomically decrements @ptr by 1.
19040+ */
19041+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
19042+{
19043+ atomic64_sub_unchecked(1, ptr);
19044+}
19045+EXPORT_SYMBOL(atomic64_dec_unchecked);
19046+
19047+/**
19048 * atomic64_dec_and_test - decrement and test
19049 * @ptr: pointer to type atomic64_t
19050 *
19051diff -urNp linux-2.6.32.45/arch/x86/lib/checksum_32.S linux-2.6.32.45/arch/x86/lib/checksum_32.S
19052--- linux-2.6.32.45/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
19053+++ linux-2.6.32.45/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
19054@@ -28,7 +28,8 @@
19055 #include <linux/linkage.h>
19056 #include <asm/dwarf2.h>
19057 #include <asm/errno.h>
19058-
19059+#include <asm/segment.h>
19060+
19061 /*
19062 * computes a partial checksum, e.g. for TCP/UDP fragments
19063 */
19064@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
19065
19066 #define ARGBASE 16
19067 #define FP 12
19068-
19069-ENTRY(csum_partial_copy_generic)
19070+
19071+ENTRY(csum_partial_copy_generic_to_user)
19072 CFI_STARTPROC
19073+
19074+#ifdef CONFIG_PAX_MEMORY_UDEREF
19075+ pushl %gs
19076+ CFI_ADJUST_CFA_OFFSET 4
19077+ popl %es
19078+ CFI_ADJUST_CFA_OFFSET -4
19079+ jmp csum_partial_copy_generic
19080+#endif
19081+
19082+ENTRY(csum_partial_copy_generic_from_user)
19083+
19084+#ifdef CONFIG_PAX_MEMORY_UDEREF
19085+ pushl %gs
19086+ CFI_ADJUST_CFA_OFFSET 4
19087+ popl %ds
19088+ CFI_ADJUST_CFA_OFFSET -4
19089+#endif
19090+
19091+ENTRY(csum_partial_copy_generic)
19092 subl $4,%esp
19093 CFI_ADJUST_CFA_OFFSET 4
19094 pushl %edi
19095@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
19096 jmp 4f
19097 SRC(1: movw (%esi), %bx )
19098 addl $2, %esi
19099-DST( movw %bx, (%edi) )
19100+DST( movw %bx, %es:(%edi) )
19101 addl $2, %edi
19102 addw %bx, %ax
19103 adcl $0, %eax
19104@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
19105 SRC(1: movl (%esi), %ebx )
19106 SRC( movl 4(%esi), %edx )
19107 adcl %ebx, %eax
19108-DST( movl %ebx, (%edi) )
19109+DST( movl %ebx, %es:(%edi) )
19110 adcl %edx, %eax
19111-DST( movl %edx, 4(%edi) )
19112+DST( movl %edx, %es:4(%edi) )
19113
19114 SRC( movl 8(%esi), %ebx )
19115 SRC( movl 12(%esi), %edx )
19116 adcl %ebx, %eax
19117-DST( movl %ebx, 8(%edi) )
19118+DST( movl %ebx, %es:8(%edi) )
19119 adcl %edx, %eax
19120-DST( movl %edx, 12(%edi) )
19121+DST( movl %edx, %es:12(%edi) )
19122
19123 SRC( movl 16(%esi), %ebx )
19124 SRC( movl 20(%esi), %edx )
19125 adcl %ebx, %eax
19126-DST( movl %ebx, 16(%edi) )
19127+DST( movl %ebx, %es:16(%edi) )
19128 adcl %edx, %eax
19129-DST( movl %edx, 20(%edi) )
19130+DST( movl %edx, %es:20(%edi) )
19131
19132 SRC( movl 24(%esi), %ebx )
19133 SRC( movl 28(%esi), %edx )
19134 adcl %ebx, %eax
19135-DST( movl %ebx, 24(%edi) )
19136+DST( movl %ebx, %es:24(%edi) )
19137 adcl %edx, %eax
19138-DST( movl %edx, 28(%edi) )
19139+DST( movl %edx, %es:28(%edi) )
19140
19141 lea 32(%esi), %esi
19142 lea 32(%edi), %edi
19143@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
19144 shrl $2, %edx # This clears CF
19145 SRC(3: movl (%esi), %ebx )
19146 adcl %ebx, %eax
19147-DST( movl %ebx, (%edi) )
19148+DST( movl %ebx, %es:(%edi) )
19149 lea 4(%esi), %esi
19150 lea 4(%edi), %edi
19151 dec %edx
19152@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
19153 jb 5f
19154 SRC( movw (%esi), %cx )
19155 leal 2(%esi), %esi
19156-DST( movw %cx, (%edi) )
19157+DST( movw %cx, %es:(%edi) )
19158 leal 2(%edi), %edi
19159 je 6f
19160 shll $16,%ecx
19161 SRC(5: movb (%esi), %cl )
19162-DST( movb %cl, (%edi) )
19163+DST( movb %cl, %es:(%edi) )
19164 6: addl %ecx, %eax
19165 adcl $0, %eax
19166 7:
19167@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
19168
19169 6001:
19170 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19171- movl $-EFAULT, (%ebx)
19172+ movl $-EFAULT, %ss:(%ebx)
19173
19174 # zero the complete destination - computing the rest
19175 # is too much work
19176@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
19177
19178 6002:
19179 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19180- movl $-EFAULT,(%ebx)
19181+ movl $-EFAULT,%ss:(%ebx)
19182 jmp 5000b
19183
19184 .previous
19185
19186+ pushl %ss
19187+ CFI_ADJUST_CFA_OFFSET 4
19188+ popl %ds
19189+ CFI_ADJUST_CFA_OFFSET -4
19190+ pushl %ss
19191+ CFI_ADJUST_CFA_OFFSET 4
19192+ popl %es
19193+ CFI_ADJUST_CFA_OFFSET -4
19194 popl %ebx
19195 CFI_ADJUST_CFA_OFFSET -4
19196 CFI_RESTORE ebx
19197@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
19198 CFI_ADJUST_CFA_OFFSET -4
19199 ret
19200 CFI_ENDPROC
19201-ENDPROC(csum_partial_copy_generic)
19202+ENDPROC(csum_partial_copy_generic_to_user)
19203
19204 #else
19205
19206 /* Version for PentiumII/PPro */
19207
19208 #define ROUND1(x) \
19209+ nop; nop; nop; \
19210 SRC(movl x(%esi), %ebx ) ; \
19211 addl %ebx, %eax ; \
19212- DST(movl %ebx, x(%edi) ) ;
19213+ DST(movl %ebx, %es:x(%edi)) ;
19214
19215 #define ROUND(x) \
19216+ nop; nop; nop; \
19217 SRC(movl x(%esi), %ebx ) ; \
19218 adcl %ebx, %eax ; \
19219- DST(movl %ebx, x(%edi) ) ;
19220+ DST(movl %ebx, %es:x(%edi)) ;
19221
19222 #define ARGBASE 12
19223-
19224-ENTRY(csum_partial_copy_generic)
19225+
19226+ENTRY(csum_partial_copy_generic_to_user)
19227 CFI_STARTPROC
19228+
19229+#ifdef CONFIG_PAX_MEMORY_UDEREF
19230+ pushl %gs
19231+ CFI_ADJUST_CFA_OFFSET 4
19232+ popl %es
19233+ CFI_ADJUST_CFA_OFFSET -4
19234+ jmp csum_partial_copy_generic
19235+#endif
19236+
19237+ENTRY(csum_partial_copy_generic_from_user)
19238+
19239+#ifdef CONFIG_PAX_MEMORY_UDEREF
19240+ pushl %gs
19241+ CFI_ADJUST_CFA_OFFSET 4
19242+ popl %ds
19243+ CFI_ADJUST_CFA_OFFSET -4
19244+#endif
19245+
19246+ENTRY(csum_partial_copy_generic)
19247 pushl %ebx
19248 CFI_ADJUST_CFA_OFFSET 4
19249 CFI_REL_OFFSET ebx, 0
19250@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
19251 subl %ebx, %edi
19252 lea -1(%esi),%edx
19253 andl $-32,%edx
19254- lea 3f(%ebx,%ebx), %ebx
19255+ lea 3f(%ebx,%ebx,2), %ebx
19256 testl %esi, %esi
19257 jmp *%ebx
19258 1: addl $64,%esi
19259@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
19260 jb 5f
19261 SRC( movw (%esi), %dx )
19262 leal 2(%esi), %esi
19263-DST( movw %dx, (%edi) )
19264+DST( movw %dx, %es:(%edi) )
19265 leal 2(%edi), %edi
19266 je 6f
19267 shll $16,%edx
19268 5:
19269 SRC( movb (%esi), %dl )
19270-DST( movb %dl, (%edi) )
19271+DST( movb %dl, %es:(%edi) )
19272 6: addl %edx, %eax
19273 adcl $0, %eax
19274 7:
19275 .section .fixup, "ax"
19276 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19277- movl $-EFAULT, (%ebx)
19278+ movl $-EFAULT, %ss:(%ebx)
19279 # zero the complete destination (computing the rest is too much work)
19280 movl ARGBASE+8(%esp),%edi # dst
19281 movl ARGBASE+12(%esp),%ecx # len
19282@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
19283 rep; stosb
19284 jmp 7b
19285 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19286- movl $-EFAULT, (%ebx)
19287+ movl $-EFAULT, %ss:(%ebx)
19288 jmp 7b
19289 .previous
19290
19291+#ifdef CONFIG_PAX_MEMORY_UDEREF
19292+ pushl %ss
19293+ CFI_ADJUST_CFA_OFFSET 4
19294+ popl %ds
19295+ CFI_ADJUST_CFA_OFFSET -4
19296+ pushl %ss
19297+ CFI_ADJUST_CFA_OFFSET 4
19298+ popl %es
19299+ CFI_ADJUST_CFA_OFFSET -4
19300+#endif
19301+
19302 popl %esi
19303 CFI_ADJUST_CFA_OFFSET -4
19304 CFI_RESTORE esi
19305@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
19306 CFI_RESTORE ebx
19307 ret
19308 CFI_ENDPROC
19309-ENDPROC(csum_partial_copy_generic)
19310+ENDPROC(csum_partial_copy_generic_to_user)
19311
19312 #undef ROUND
19313 #undef ROUND1
19314diff -urNp linux-2.6.32.45/arch/x86/lib/clear_page_64.S linux-2.6.32.45/arch/x86/lib/clear_page_64.S
19315--- linux-2.6.32.45/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
19316+++ linux-2.6.32.45/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
19317@@ -43,7 +43,7 @@ ENDPROC(clear_page)
19318
19319 #include <asm/cpufeature.h>
19320
19321- .section .altinstr_replacement,"ax"
19322+ .section .altinstr_replacement,"a"
19323 1: .byte 0xeb /* jmp <disp8> */
19324 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19325 2:
19326diff -urNp linux-2.6.32.45/arch/x86/lib/copy_page_64.S linux-2.6.32.45/arch/x86/lib/copy_page_64.S
19327--- linux-2.6.32.45/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
19328+++ linux-2.6.32.45/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
19329@@ -104,7 +104,7 @@ ENDPROC(copy_page)
19330
19331 #include <asm/cpufeature.h>
19332
19333- .section .altinstr_replacement,"ax"
19334+ .section .altinstr_replacement,"a"
19335 1: .byte 0xeb /* jmp <disp8> */
19336 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19337 2:
19338diff -urNp linux-2.6.32.45/arch/x86/lib/copy_user_64.S linux-2.6.32.45/arch/x86/lib/copy_user_64.S
19339--- linux-2.6.32.45/arch/x86/lib/copy_user_64.S 2011-06-25 12:55:34.000000000 -0400
19340+++ linux-2.6.32.45/arch/x86/lib/copy_user_64.S 2011-06-25 12:56:37.000000000 -0400
19341@@ -15,13 +15,14 @@
19342 #include <asm/asm-offsets.h>
19343 #include <asm/thread_info.h>
19344 #include <asm/cpufeature.h>
19345+#include <asm/pgtable.h>
19346
19347 .macro ALTERNATIVE_JUMP feature,orig,alt
19348 0:
19349 .byte 0xe9 /* 32bit jump */
19350 .long \orig-1f /* by default jump to orig */
19351 1:
19352- .section .altinstr_replacement,"ax"
19353+ .section .altinstr_replacement,"a"
19354 2: .byte 0xe9 /* near jump with 32bit immediate */
19355 .long \alt-1b /* offset */ /* or alternatively to alt */
19356 .previous
19357@@ -64,49 +65,19 @@
19358 #endif
19359 .endm
19360
19361-/* Standard copy_to_user with segment limit checking */
19362-ENTRY(copy_to_user)
19363- CFI_STARTPROC
19364- GET_THREAD_INFO(%rax)
19365- movq %rdi,%rcx
19366- addq %rdx,%rcx
19367- jc bad_to_user
19368- cmpq TI_addr_limit(%rax),%rcx
19369- ja bad_to_user
19370- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19371- CFI_ENDPROC
19372-ENDPROC(copy_to_user)
19373-
19374-/* Standard copy_from_user with segment limit checking */
19375-ENTRY(copy_from_user)
19376- CFI_STARTPROC
19377- GET_THREAD_INFO(%rax)
19378- movq %rsi,%rcx
19379- addq %rdx,%rcx
19380- jc bad_from_user
19381- cmpq TI_addr_limit(%rax),%rcx
19382- ja bad_from_user
19383- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19384- CFI_ENDPROC
19385-ENDPROC(copy_from_user)
19386-
19387 ENTRY(copy_user_generic)
19388 CFI_STARTPROC
19389 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19390 CFI_ENDPROC
19391 ENDPROC(copy_user_generic)
19392
19393-ENTRY(__copy_from_user_inatomic)
19394- CFI_STARTPROC
19395- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19396- CFI_ENDPROC
19397-ENDPROC(__copy_from_user_inatomic)
19398-
19399 .section .fixup,"ax"
19400 /* must zero dest */
19401 ENTRY(bad_from_user)
19402 bad_from_user:
19403 CFI_STARTPROC
19404+ testl %edx,%edx
19405+ js bad_to_user
19406 movl %edx,%ecx
19407 xorl %eax,%eax
19408 rep
19409diff -urNp linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S
19410--- linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
19411+++ linux-2.6.32.45/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
19412@@ -14,6 +14,7 @@
19413 #include <asm/current.h>
19414 #include <asm/asm-offsets.h>
19415 #include <asm/thread_info.h>
19416+#include <asm/pgtable.h>
19417
19418 .macro ALIGN_DESTINATION
19419 #ifdef FIX_ALIGNMENT
19420@@ -50,6 +51,15 @@
19421 */
19422 ENTRY(__copy_user_nocache)
19423 CFI_STARTPROC
19424+
19425+#ifdef CONFIG_PAX_MEMORY_UDEREF
19426+ mov $PAX_USER_SHADOW_BASE,%rcx
19427+ cmp %rcx,%rsi
19428+ jae 1f
19429+ add %rcx,%rsi
19430+1:
19431+#endif
19432+
19433 cmpl $8,%edx
19434 jb 20f /* less then 8 bytes, go to byte copy loop */
19435 ALIGN_DESTINATION
19436diff -urNp linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c
19437--- linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
19438+++ linux-2.6.32.45/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
19439@@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
19440 len -= 2;
19441 }
19442 }
19443+
19444+#ifdef CONFIG_PAX_MEMORY_UDEREF
19445+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19446+ src += PAX_USER_SHADOW_BASE;
19447+#endif
19448+
19449 isum = csum_partial_copy_generic((__force const void *)src,
19450 dst, len, isum, errp, NULL);
19451 if (unlikely(*errp))
19452@@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
19453 }
19454
19455 *errp = 0;
19456+
19457+#ifdef CONFIG_PAX_MEMORY_UDEREF
19458+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19459+ dst += PAX_USER_SHADOW_BASE;
19460+#endif
19461+
19462 return csum_partial_copy_generic(src, (void __force *)dst,
19463 len, isum, NULL, errp);
19464 }
19465diff -urNp linux-2.6.32.45/arch/x86/lib/getuser.S linux-2.6.32.45/arch/x86/lib/getuser.S
19466--- linux-2.6.32.45/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
19467+++ linux-2.6.32.45/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
19468@@ -33,14 +33,35 @@
19469 #include <asm/asm-offsets.h>
19470 #include <asm/thread_info.h>
19471 #include <asm/asm.h>
19472+#include <asm/segment.h>
19473+#include <asm/pgtable.h>
19474+
19475+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19476+#define __copyuser_seg gs;
19477+#else
19478+#define __copyuser_seg
19479+#endif
19480
19481 .text
19482 ENTRY(__get_user_1)
19483 CFI_STARTPROC
19484+
19485+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19486 GET_THREAD_INFO(%_ASM_DX)
19487 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19488 jae bad_get_user
19489-1: movzb (%_ASM_AX),%edx
19490+
19491+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19492+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19493+ cmp %_ASM_DX,%_ASM_AX
19494+ jae 1234f
19495+ add %_ASM_DX,%_ASM_AX
19496+1234:
19497+#endif
19498+
19499+#endif
19500+
19501+1: __copyuser_seg movzb (%_ASM_AX),%edx
19502 xor %eax,%eax
19503 ret
19504 CFI_ENDPROC
19505@@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
19506 ENTRY(__get_user_2)
19507 CFI_STARTPROC
19508 add $1,%_ASM_AX
19509+
19510+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19511 jc bad_get_user
19512 GET_THREAD_INFO(%_ASM_DX)
19513 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19514 jae bad_get_user
19515-2: movzwl -1(%_ASM_AX),%edx
19516+
19517+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19518+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19519+ cmp %_ASM_DX,%_ASM_AX
19520+ jae 1234f
19521+ add %_ASM_DX,%_ASM_AX
19522+1234:
19523+#endif
19524+
19525+#endif
19526+
19527+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19528 xor %eax,%eax
19529 ret
19530 CFI_ENDPROC
19531@@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
19532 ENTRY(__get_user_4)
19533 CFI_STARTPROC
19534 add $3,%_ASM_AX
19535+
19536+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19537 jc bad_get_user
19538 GET_THREAD_INFO(%_ASM_DX)
19539 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19540 jae bad_get_user
19541-3: mov -3(%_ASM_AX),%edx
19542+
19543+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19544+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19545+ cmp %_ASM_DX,%_ASM_AX
19546+ jae 1234f
19547+ add %_ASM_DX,%_ASM_AX
19548+1234:
19549+#endif
19550+
19551+#endif
19552+
19553+3: __copyuser_seg mov -3(%_ASM_AX),%edx
19554 xor %eax,%eax
19555 ret
19556 CFI_ENDPROC
19557@@ -80,6 +127,15 @@ ENTRY(__get_user_8)
19558 GET_THREAD_INFO(%_ASM_DX)
19559 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19560 jae bad_get_user
19561+
19562+#ifdef CONFIG_PAX_MEMORY_UDEREF
19563+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19564+ cmp %_ASM_DX,%_ASM_AX
19565+ jae 1234f
19566+ add %_ASM_DX,%_ASM_AX
19567+1234:
19568+#endif
19569+
19570 4: movq -7(%_ASM_AX),%_ASM_DX
19571 xor %eax,%eax
19572 ret
19573diff -urNp linux-2.6.32.45/arch/x86/lib/memcpy_64.S linux-2.6.32.45/arch/x86/lib/memcpy_64.S
19574--- linux-2.6.32.45/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
19575+++ linux-2.6.32.45/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
19576@@ -128,7 +128,7 @@ ENDPROC(__memcpy)
19577 * It is also a lot simpler. Use this when possible:
19578 */
19579
19580- .section .altinstr_replacement, "ax"
19581+ .section .altinstr_replacement, "a"
19582 1: .byte 0xeb /* jmp <disp8> */
19583 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
19584 2:
19585diff -urNp linux-2.6.32.45/arch/x86/lib/memset_64.S linux-2.6.32.45/arch/x86/lib/memset_64.S
19586--- linux-2.6.32.45/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
19587+++ linux-2.6.32.45/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
19588@@ -118,7 +118,7 @@ ENDPROC(__memset)
19589
19590 #include <asm/cpufeature.h>
19591
19592- .section .altinstr_replacement,"ax"
19593+ .section .altinstr_replacement,"a"
19594 1: .byte 0xeb /* jmp <disp8> */
19595 .byte (memset_c - memset) - (2f - 1b) /* offset */
19596 2:
19597diff -urNp linux-2.6.32.45/arch/x86/lib/mmx_32.c linux-2.6.32.45/arch/x86/lib/mmx_32.c
19598--- linux-2.6.32.45/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
19599+++ linux-2.6.32.45/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
19600@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
19601 {
19602 void *p;
19603 int i;
19604+ unsigned long cr0;
19605
19606 if (unlikely(in_interrupt()))
19607 return __memcpy(to, from, len);
19608@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
19609 kernel_fpu_begin();
19610
19611 __asm__ __volatile__ (
19612- "1: prefetch (%0)\n" /* This set is 28 bytes */
19613- " prefetch 64(%0)\n"
19614- " prefetch 128(%0)\n"
19615- " prefetch 192(%0)\n"
19616- " prefetch 256(%0)\n"
19617+ "1: prefetch (%1)\n" /* This set is 28 bytes */
19618+ " prefetch 64(%1)\n"
19619+ " prefetch 128(%1)\n"
19620+ " prefetch 192(%1)\n"
19621+ " prefetch 256(%1)\n"
19622 "2: \n"
19623 ".section .fixup, \"ax\"\n"
19624- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19625+ "3: \n"
19626+
19627+#ifdef CONFIG_PAX_KERNEXEC
19628+ " movl %%cr0, %0\n"
19629+ " movl %0, %%eax\n"
19630+ " andl $0xFFFEFFFF, %%eax\n"
19631+ " movl %%eax, %%cr0\n"
19632+#endif
19633+
19634+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19635+
19636+#ifdef CONFIG_PAX_KERNEXEC
19637+ " movl %0, %%cr0\n"
19638+#endif
19639+
19640 " jmp 2b\n"
19641 ".previous\n"
19642 _ASM_EXTABLE(1b, 3b)
19643- : : "r" (from));
19644+ : "=&r" (cr0) : "r" (from) : "ax");
19645
19646 for ( ; i > 5; i--) {
19647 __asm__ __volatile__ (
19648- "1: prefetch 320(%0)\n"
19649- "2: movq (%0), %%mm0\n"
19650- " movq 8(%0), %%mm1\n"
19651- " movq 16(%0), %%mm2\n"
19652- " movq 24(%0), %%mm3\n"
19653- " movq %%mm0, (%1)\n"
19654- " movq %%mm1, 8(%1)\n"
19655- " movq %%mm2, 16(%1)\n"
19656- " movq %%mm3, 24(%1)\n"
19657- " movq 32(%0), %%mm0\n"
19658- " movq 40(%0), %%mm1\n"
19659- " movq 48(%0), %%mm2\n"
19660- " movq 56(%0), %%mm3\n"
19661- " movq %%mm0, 32(%1)\n"
19662- " movq %%mm1, 40(%1)\n"
19663- " movq %%mm2, 48(%1)\n"
19664- " movq %%mm3, 56(%1)\n"
19665+ "1: prefetch 320(%1)\n"
19666+ "2: movq (%1), %%mm0\n"
19667+ " movq 8(%1), %%mm1\n"
19668+ " movq 16(%1), %%mm2\n"
19669+ " movq 24(%1), %%mm3\n"
19670+ " movq %%mm0, (%2)\n"
19671+ " movq %%mm1, 8(%2)\n"
19672+ " movq %%mm2, 16(%2)\n"
19673+ " movq %%mm3, 24(%2)\n"
19674+ " movq 32(%1), %%mm0\n"
19675+ " movq 40(%1), %%mm1\n"
19676+ " movq 48(%1), %%mm2\n"
19677+ " movq 56(%1), %%mm3\n"
19678+ " movq %%mm0, 32(%2)\n"
19679+ " movq %%mm1, 40(%2)\n"
19680+ " movq %%mm2, 48(%2)\n"
19681+ " movq %%mm3, 56(%2)\n"
19682 ".section .fixup, \"ax\"\n"
19683- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19684+ "3:\n"
19685+
19686+#ifdef CONFIG_PAX_KERNEXEC
19687+ " movl %%cr0, %0\n"
19688+ " movl %0, %%eax\n"
19689+ " andl $0xFFFEFFFF, %%eax\n"
19690+ " movl %%eax, %%cr0\n"
19691+#endif
19692+
19693+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19694+
19695+#ifdef CONFIG_PAX_KERNEXEC
19696+ " movl %0, %%cr0\n"
19697+#endif
19698+
19699 " jmp 2b\n"
19700 ".previous\n"
19701 _ASM_EXTABLE(1b, 3b)
19702- : : "r" (from), "r" (to) : "memory");
19703+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19704
19705 from += 64;
19706 to += 64;
19707@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
19708 static void fast_copy_page(void *to, void *from)
19709 {
19710 int i;
19711+ unsigned long cr0;
19712
19713 kernel_fpu_begin();
19714
19715@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
19716 * but that is for later. -AV
19717 */
19718 __asm__ __volatile__(
19719- "1: prefetch (%0)\n"
19720- " prefetch 64(%0)\n"
19721- " prefetch 128(%0)\n"
19722- " prefetch 192(%0)\n"
19723- " prefetch 256(%0)\n"
19724+ "1: prefetch (%1)\n"
19725+ " prefetch 64(%1)\n"
19726+ " prefetch 128(%1)\n"
19727+ " prefetch 192(%1)\n"
19728+ " prefetch 256(%1)\n"
19729 "2: \n"
19730 ".section .fixup, \"ax\"\n"
19731- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19732+ "3: \n"
19733+
19734+#ifdef CONFIG_PAX_KERNEXEC
19735+ " movl %%cr0, %0\n"
19736+ " movl %0, %%eax\n"
19737+ " andl $0xFFFEFFFF, %%eax\n"
19738+ " movl %%eax, %%cr0\n"
19739+#endif
19740+
19741+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19742+
19743+#ifdef CONFIG_PAX_KERNEXEC
19744+ " movl %0, %%cr0\n"
19745+#endif
19746+
19747 " jmp 2b\n"
19748 ".previous\n"
19749- _ASM_EXTABLE(1b, 3b) : : "r" (from));
19750+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19751
19752 for (i = 0; i < (4096-320)/64; i++) {
19753 __asm__ __volatile__ (
19754- "1: prefetch 320(%0)\n"
19755- "2: movq (%0), %%mm0\n"
19756- " movntq %%mm0, (%1)\n"
19757- " movq 8(%0), %%mm1\n"
19758- " movntq %%mm1, 8(%1)\n"
19759- " movq 16(%0), %%mm2\n"
19760- " movntq %%mm2, 16(%1)\n"
19761- " movq 24(%0), %%mm3\n"
19762- " movntq %%mm3, 24(%1)\n"
19763- " movq 32(%0), %%mm4\n"
19764- " movntq %%mm4, 32(%1)\n"
19765- " movq 40(%0), %%mm5\n"
19766- " movntq %%mm5, 40(%1)\n"
19767- " movq 48(%0), %%mm6\n"
19768- " movntq %%mm6, 48(%1)\n"
19769- " movq 56(%0), %%mm7\n"
19770- " movntq %%mm7, 56(%1)\n"
19771+ "1: prefetch 320(%1)\n"
19772+ "2: movq (%1), %%mm0\n"
19773+ " movntq %%mm0, (%2)\n"
19774+ " movq 8(%1), %%mm1\n"
19775+ " movntq %%mm1, 8(%2)\n"
19776+ " movq 16(%1), %%mm2\n"
19777+ " movntq %%mm2, 16(%2)\n"
19778+ " movq 24(%1), %%mm3\n"
19779+ " movntq %%mm3, 24(%2)\n"
19780+ " movq 32(%1), %%mm4\n"
19781+ " movntq %%mm4, 32(%2)\n"
19782+ " movq 40(%1), %%mm5\n"
19783+ " movntq %%mm5, 40(%2)\n"
19784+ " movq 48(%1), %%mm6\n"
19785+ " movntq %%mm6, 48(%2)\n"
19786+ " movq 56(%1), %%mm7\n"
19787+ " movntq %%mm7, 56(%2)\n"
19788 ".section .fixup, \"ax\"\n"
19789- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19790+ "3:\n"
19791+
19792+#ifdef CONFIG_PAX_KERNEXEC
19793+ " movl %%cr0, %0\n"
19794+ " movl %0, %%eax\n"
19795+ " andl $0xFFFEFFFF, %%eax\n"
19796+ " movl %%eax, %%cr0\n"
19797+#endif
19798+
19799+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19800+
19801+#ifdef CONFIG_PAX_KERNEXEC
19802+ " movl %0, %%cr0\n"
19803+#endif
19804+
19805 " jmp 2b\n"
19806 ".previous\n"
19807- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
19808+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19809
19810 from += 64;
19811 to += 64;
19812@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
19813 static void fast_copy_page(void *to, void *from)
19814 {
19815 int i;
19816+ unsigned long cr0;
19817
19818 kernel_fpu_begin();
19819
19820 __asm__ __volatile__ (
19821- "1: prefetch (%0)\n"
19822- " prefetch 64(%0)\n"
19823- " prefetch 128(%0)\n"
19824- " prefetch 192(%0)\n"
19825- " prefetch 256(%0)\n"
19826+ "1: prefetch (%1)\n"
19827+ " prefetch 64(%1)\n"
19828+ " prefetch 128(%1)\n"
19829+ " prefetch 192(%1)\n"
19830+ " prefetch 256(%1)\n"
19831 "2: \n"
19832 ".section .fixup, \"ax\"\n"
19833- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19834+ "3: \n"
19835+
19836+#ifdef CONFIG_PAX_KERNEXEC
19837+ " movl %%cr0, %0\n"
19838+ " movl %0, %%eax\n"
19839+ " andl $0xFFFEFFFF, %%eax\n"
19840+ " movl %%eax, %%cr0\n"
19841+#endif
19842+
19843+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19844+
19845+#ifdef CONFIG_PAX_KERNEXEC
19846+ " movl %0, %%cr0\n"
19847+#endif
19848+
19849 " jmp 2b\n"
19850 ".previous\n"
19851- _ASM_EXTABLE(1b, 3b) : : "r" (from));
19852+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19853
19854 for (i = 0; i < 4096/64; i++) {
19855 __asm__ __volatile__ (
19856- "1: prefetch 320(%0)\n"
19857- "2: movq (%0), %%mm0\n"
19858- " movq 8(%0), %%mm1\n"
19859- " movq 16(%0), %%mm2\n"
19860- " movq 24(%0), %%mm3\n"
19861- " movq %%mm0, (%1)\n"
19862- " movq %%mm1, 8(%1)\n"
19863- " movq %%mm2, 16(%1)\n"
19864- " movq %%mm3, 24(%1)\n"
19865- " movq 32(%0), %%mm0\n"
19866- " movq 40(%0), %%mm1\n"
19867- " movq 48(%0), %%mm2\n"
19868- " movq 56(%0), %%mm3\n"
19869- " movq %%mm0, 32(%1)\n"
19870- " movq %%mm1, 40(%1)\n"
19871- " movq %%mm2, 48(%1)\n"
19872- " movq %%mm3, 56(%1)\n"
19873+ "1: prefetch 320(%1)\n"
19874+ "2: movq (%1), %%mm0\n"
19875+ " movq 8(%1), %%mm1\n"
19876+ " movq 16(%1), %%mm2\n"
19877+ " movq 24(%1), %%mm3\n"
19878+ " movq %%mm0, (%2)\n"
19879+ " movq %%mm1, 8(%2)\n"
19880+ " movq %%mm2, 16(%2)\n"
19881+ " movq %%mm3, 24(%2)\n"
19882+ " movq 32(%1), %%mm0\n"
19883+ " movq 40(%1), %%mm1\n"
19884+ " movq 48(%1), %%mm2\n"
19885+ " movq 56(%1), %%mm3\n"
19886+ " movq %%mm0, 32(%2)\n"
19887+ " movq %%mm1, 40(%2)\n"
19888+ " movq %%mm2, 48(%2)\n"
19889+ " movq %%mm3, 56(%2)\n"
19890 ".section .fixup, \"ax\"\n"
19891- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19892+ "3:\n"
19893+
19894+#ifdef CONFIG_PAX_KERNEXEC
19895+ " movl %%cr0, %0\n"
19896+ " movl %0, %%eax\n"
19897+ " andl $0xFFFEFFFF, %%eax\n"
19898+ " movl %%eax, %%cr0\n"
19899+#endif
19900+
19901+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19902+
19903+#ifdef CONFIG_PAX_KERNEXEC
19904+ " movl %0, %%cr0\n"
19905+#endif
19906+
19907 " jmp 2b\n"
19908 ".previous\n"
19909 _ASM_EXTABLE(1b, 3b)
19910- : : "r" (from), "r" (to) : "memory");
19911+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19912
19913 from += 64;
19914 to += 64;
19915diff -urNp linux-2.6.32.45/arch/x86/lib/putuser.S linux-2.6.32.45/arch/x86/lib/putuser.S
19916--- linux-2.6.32.45/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
19917+++ linux-2.6.32.45/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
19918@@ -15,7 +15,8 @@
19919 #include <asm/thread_info.h>
19920 #include <asm/errno.h>
19921 #include <asm/asm.h>
19922-
19923+#include <asm/segment.h>
19924+#include <asm/pgtable.h>
19925
19926 /*
19927 * __put_user_X
19928@@ -29,52 +30,119 @@
19929 * as they get called from within inline assembly.
19930 */
19931
19932-#define ENTER CFI_STARTPROC ; \
19933- GET_THREAD_INFO(%_ASM_BX)
19934+#define ENTER CFI_STARTPROC
19935 #define EXIT ret ; \
19936 CFI_ENDPROC
19937
19938+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19939+#define _DEST %_ASM_CX,%_ASM_BX
19940+#else
19941+#define _DEST %_ASM_CX
19942+#endif
19943+
19944+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19945+#define __copyuser_seg gs;
19946+#else
19947+#define __copyuser_seg
19948+#endif
19949+
19950 .text
19951 ENTRY(__put_user_1)
19952 ENTER
19953+
19954+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19955+ GET_THREAD_INFO(%_ASM_BX)
19956 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
19957 jae bad_put_user
19958-1: movb %al,(%_ASM_CX)
19959+
19960+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19961+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19962+ cmp %_ASM_BX,%_ASM_CX
19963+ jb 1234f
19964+ xor %ebx,%ebx
19965+1234:
19966+#endif
19967+
19968+#endif
19969+
19970+1: __copyuser_seg movb %al,(_DEST)
19971 xor %eax,%eax
19972 EXIT
19973 ENDPROC(__put_user_1)
19974
19975 ENTRY(__put_user_2)
19976 ENTER
19977+
19978+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19979+ GET_THREAD_INFO(%_ASM_BX)
19980 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19981 sub $1,%_ASM_BX
19982 cmp %_ASM_BX,%_ASM_CX
19983 jae bad_put_user
19984-2: movw %ax,(%_ASM_CX)
19985+
19986+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19987+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19988+ cmp %_ASM_BX,%_ASM_CX
19989+ jb 1234f
19990+ xor %ebx,%ebx
19991+1234:
19992+#endif
19993+
19994+#endif
19995+
19996+2: __copyuser_seg movw %ax,(_DEST)
19997 xor %eax,%eax
19998 EXIT
19999 ENDPROC(__put_user_2)
20000
20001 ENTRY(__put_user_4)
20002 ENTER
20003+
20004+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20005+ GET_THREAD_INFO(%_ASM_BX)
20006 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20007 sub $3,%_ASM_BX
20008 cmp %_ASM_BX,%_ASM_CX
20009 jae bad_put_user
20010-3: movl %eax,(%_ASM_CX)
20011+
20012+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20013+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20014+ cmp %_ASM_BX,%_ASM_CX
20015+ jb 1234f
20016+ xor %ebx,%ebx
20017+1234:
20018+#endif
20019+
20020+#endif
20021+
20022+3: __copyuser_seg movl %eax,(_DEST)
20023 xor %eax,%eax
20024 EXIT
20025 ENDPROC(__put_user_4)
20026
20027 ENTRY(__put_user_8)
20028 ENTER
20029+
20030+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20031+ GET_THREAD_INFO(%_ASM_BX)
20032 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20033 sub $7,%_ASM_BX
20034 cmp %_ASM_BX,%_ASM_CX
20035 jae bad_put_user
20036-4: mov %_ASM_AX,(%_ASM_CX)
20037+
20038+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20039+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20040+ cmp %_ASM_BX,%_ASM_CX
20041+ jb 1234f
20042+ xor %ebx,%ebx
20043+1234:
20044+#endif
20045+
20046+#endif
20047+
20048+4: __copyuser_seg mov %_ASM_AX,(_DEST)
20049 #ifdef CONFIG_X86_32
20050-5: movl %edx,4(%_ASM_CX)
20051+5: __copyuser_seg movl %edx,4(_DEST)
20052 #endif
20053 xor %eax,%eax
20054 EXIT
20055diff -urNp linux-2.6.32.45/arch/x86/lib/usercopy_32.c linux-2.6.32.45/arch/x86/lib/usercopy_32.c
20056--- linux-2.6.32.45/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
20057+++ linux-2.6.32.45/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
20058@@ -43,7 +43,7 @@ do { \
20059 __asm__ __volatile__( \
20060 " testl %1,%1\n" \
20061 " jz 2f\n" \
20062- "0: lodsb\n" \
20063+ "0: "__copyuser_seg"lodsb\n" \
20064 " stosb\n" \
20065 " testb %%al,%%al\n" \
20066 " jz 1f\n" \
20067@@ -128,10 +128,12 @@ do { \
20068 int __d0; \
20069 might_fault(); \
20070 __asm__ __volatile__( \
20071+ __COPYUSER_SET_ES \
20072 "0: rep; stosl\n" \
20073 " movl %2,%0\n" \
20074 "1: rep; stosb\n" \
20075 "2:\n" \
20076+ __COPYUSER_RESTORE_ES \
20077 ".section .fixup,\"ax\"\n" \
20078 "3: lea 0(%2,%0,4),%0\n" \
20079 " jmp 2b\n" \
20080@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
20081 might_fault();
20082
20083 __asm__ __volatile__(
20084+ __COPYUSER_SET_ES
20085 " testl %0, %0\n"
20086 " jz 3f\n"
20087 " andl %0,%%ecx\n"
20088@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
20089 " subl %%ecx,%0\n"
20090 " addl %0,%%eax\n"
20091 "1:\n"
20092+ __COPYUSER_RESTORE_ES
20093 ".section .fixup,\"ax\"\n"
20094 "2: xorl %%eax,%%eax\n"
20095 " jmp 1b\n"
20096@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
20097
20098 #ifdef CONFIG_X86_INTEL_USERCOPY
20099 static unsigned long
20100-__copy_user_intel(void __user *to, const void *from, unsigned long size)
20101+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
20102 {
20103 int d0, d1;
20104 __asm__ __volatile__(
20105@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
20106 " .align 2,0x90\n"
20107 "3: movl 0(%4), %%eax\n"
20108 "4: movl 4(%4), %%edx\n"
20109- "5: movl %%eax, 0(%3)\n"
20110- "6: movl %%edx, 4(%3)\n"
20111+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20112+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
20113 "7: movl 8(%4), %%eax\n"
20114 "8: movl 12(%4),%%edx\n"
20115- "9: movl %%eax, 8(%3)\n"
20116- "10: movl %%edx, 12(%3)\n"
20117+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20118+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20119 "11: movl 16(%4), %%eax\n"
20120 "12: movl 20(%4), %%edx\n"
20121- "13: movl %%eax, 16(%3)\n"
20122- "14: movl %%edx, 20(%3)\n"
20123+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20124+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20125 "15: movl 24(%4), %%eax\n"
20126 "16: movl 28(%4), %%edx\n"
20127- "17: movl %%eax, 24(%3)\n"
20128- "18: movl %%edx, 28(%3)\n"
20129+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20130+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20131 "19: movl 32(%4), %%eax\n"
20132 "20: movl 36(%4), %%edx\n"
20133- "21: movl %%eax, 32(%3)\n"
20134- "22: movl %%edx, 36(%3)\n"
20135+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20136+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20137 "23: movl 40(%4), %%eax\n"
20138 "24: movl 44(%4), %%edx\n"
20139- "25: movl %%eax, 40(%3)\n"
20140- "26: movl %%edx, 44(%3)\n"
20141+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20142+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20143 "27: movl 48(%4), %%eax\n"
20144 "28: movl 52(%4), %%edx\n"
20145- "29: movl %%eax, 48(%3)\n"
20146- "30: movl %%edx, 52(%3)\n"
20147+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20148+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20149 "31: movl 56(%4), %%eax\n"
20150 "32: movl 60(%4), %%edx\n"
20151- "33: movl %%eax, 56(%3)\n"
20152- "34: movl %%edx, 60(%3)\n"
20153+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20154+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20155 " addl $-64, %0\n"
20156 " addl $64, %4\n"
20157 " addl $64, %3\n"
20158@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
20159 " shrl $2, %0\n"
20160 " andl $3, %%eax\n"
20161 " cld\n"
20162+ __COPYUSER_SET_ES
20163 "99: rep; movsl\n"
20164 "36: movl %%eax, %0\n"
20165 "37: rep; movsb\n"
20166 "100:\n"
20167+ __COPYUSER_RESTORE_ES
20168+ ".section .fixup,\"ax\"\n"
20169+ "101: lea 0(%%eax,%0,4),%0\n"
20170+ " jmp 100b\n"
20171+ ".previous\n"
20172+ ".section __ex_table,\"a\"\n"
20173+ " .align 4\n"
20174+ " .long 1b,100b\n"
20175+ " .long 2b,100b\n"
20176+ " .long 3b,100b\n"
20177+ " .long 4b,100b\n"
20178+ " .long 5b,100b\n"
20179+ " .long 6b,100b\n"
20180+ " .long 7b,100b\n"
20181+ " .long 8b,100b\n"
20182+ " .long 9b,100b\n"
20183+ " .long 10b,100b\n"
20184+ " .long 11b,100b\n"
20185+ " .long 12b,100b\n"
20186+ " .long 13b,100b\n"
20187+ " .long 14b,100b\n"
20188+ " .long 15b,100b\n"
20189+ " .long 16b,100b\n"
20190+ " .long 17b,100b\n"
20191+ " .long 18b,100b\n"
20192+ " .long 19b,100b\n"
20193+ " .long 20b,100b\n"
20194+ " .long 21b,100b\n"
20195+ " .long 22b,100b\n"
20196+ " .long 23b,100b\n"
20197+ " .long 24b,100b\n"
20198+ " .long 25b,100b\n"
20199+ " .long 26b,100b\n"
20200+ " .long 27b,100b\n"
20201+ " .long 28b,100b\n"
20202+ " .long 29b,100b\n"
20203+ " .long 30b,100b\n"
20204+ " .long 31b,100b\n"
20205+ " .long 32b,100b\n"
20206+ " .long 33b,100b\n"
20207+ " .long 34b,100b\n"
20208+ " .long 35b,100b\n"
20209+ " .long 36b,100b\n"
20210+ " .long 37b,100b\n"
20211+ " .long 99b,101b\n"
20212+ ".previous"
20213+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
20214+ : "1"(to), "2"(from), "0"(size)
20215+ : "eax", "edx", "memory");
20216+ return size;
20217+}
20218+
20219+static unsigned long
20220+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20221+{
20222+ int d0, d1;
20223+ __asm__ __volatile__(
20224+ " .align 2,0x90\n"
20225+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20226+ " cmpl $67, %0\n"
20227+ " jbe 3f\n"
20228+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20229+ " .align 2,0x90\n"
20230+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20231+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20232+ "5: movl %%eax, 0(%3)\n"
20233+ "6: movl %%edx, 4(%3)\n"
20234+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20235+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20236+ "9: movl %%eax, 8(%3)\n"
20237+ "10: movl %%edx, 12(%3)\n"
20238+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20239+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20240+ "13: movl %%eax, 16(%3)\n"
20241+ "14: movl %%edx, 20(%3)\n"
20242+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20243+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20244+ "17: movl %%eax, 24(%3)\n"
20245+ "18: movl %%edx, 28(%3)\n"
20246+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20247+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20248+ "21: movl %%eax, 32(%3)\n"
20249+ "22: movl %%edx, 36(%3)\n"
20250+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20251+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20252+ "25: movl %%eax, 40(%3)\n"
20253+ "26: movl %%edx, 44(%3)\n"
20254+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20255+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20256+ "29: movl %%eax, 48(%3)\n"
20257+ "30: movl %%edx, 52(%3)\n"
20258+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20259+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20260+ "33: movl %%eax, 56(%3)\n"
20261+ "34: movl %%edx, 60(%3)\n"
20262+ " addl $-64, %0\n"
20263+ " addl $64, %4\n"
20264+ " addl $64, %3\n"
20265+ " cmpl $63, %0\n"
20266+ " ja 1b\n"
20267+ "35: movl %0, %%eax\n"
20268+ " shrl $2, %0\n"
20269+ " andl $3, %%eax\n"
20270+ " cld\n"
20271+ "99: rep; "__copyuser_seg" movsl\n"
20272+ "36: movl %%eax, %0\n"
20273+ "37: rep; "__copyuser_seg" movsb\n"
20274+ "100:\n"
20275 ".section .fixup,\"ax\"\n"
20276 "101: lea 0(%%eax,%0,4),%0\n"
20277 " jmp 100b\n"
20278@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
20279 int d0, d1;
20280 __asm__ __volatile__(
20281 " .align 2,0x90\n"
20282- "0: movl 32(%4), %%eax\n"
20283+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20284 " cmpl $67, %0\n"
20285 " jbe 2f\n"
20286- "1: movl 64(%4), %%eax\n"
20287+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20288 " .align 2,0x90\n"
20289- "2: movl 0(%4), %%eax\n"
20290- "21: movl 4(%4), %%edx\n"
20291+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20292+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20293 " movl %%eax, 0(%3)\n"
20294 " movl %%edx, 4(%3)\n"
20295- "3: movl 8(%4), %%eax\n"
20296- "31: movl 12(%4),%%edx\n"
20297+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20298+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20299 " movl %%eax, 8(%3)\n"
20300 " movl %%edx, 12(%3)\n"
20301- "4: movl 16(%4), %%eax\n"
20302- "41: movl 20(%4), %%edx\n"
20303+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20304+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20305 " movl %%eax, 16(%3)\n"
20306 " movl %%edx, 20(%3)\n"
20307- "10: movl 24(%4), %%eax\n"
20308- "51: movl 28(%4), %%edx\n"
20309+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20310+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20311 " movl %%eax, 24(%3)\n"
20312 " movl %%edx, 28(%3)\n"
20313- "11: movl 32(%4), %%eax\n"
20314- "61: movl 36(%4), %%edx\n"
20315+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20316+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20317 " movl %%eax, 32(%3)\n"
20318 " movl %%edx, 36(%3)\n"
20319- "12: movl 40(%4), %%eax\n"
20320- "71: movl 44(%4), %%edx\n"
20321+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20322+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20323 " movl %%eax, 40(%3)\n"
20324 " movl %%edx, 44(%3)\n"
20325- "13: movl 48(%4), %%eax\n"
20326- "81: movl 52(%4), %%edx\n"
20327+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20328+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20329 " movl %%eax, 48(%3)\n"
20330 " movl %%edx, 52(%3)\n"
20331- "14: movl 56(%4), %%eax\n"
20332- "91: movl 60(%4), %%edx\n"
20333+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20334+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20335 " movl %%eax, 56(%3)\n"
20336 " movl %%edx, 60(%3)\n"
20337 " addl $-64, %0\n"
20338@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
20339 " shrl $2, %0\n"
20340 " andl $3, %%eax\n"
20341 " cld\n"
20342- "6: rep; movsl\n"
20343+ "6: rep; "__copyuser_seg" movsl\n"
20344 " movl %%eax,%0\n"
20345- "7: rep; movsb\n"
20346+ "7: rep; "__copyuser_seg" movsb\n"
20347 "8:\n"
20348 ".section .fixup,\"ax\"\n"
20349 "9: lea 0(%%eax,%0,4),%0\n"
20350@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
20351
20352 __asm__ __volatile__(
20353 " .align 2,0x90\n"
20354- "0: movl 32(%4), %%eax\n"
20355+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20356 " cmpl $67, %0\n"
20357 " jbe 2f\n"
20358- "1: movl 64(%4), %%eax\n"
20359+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20360 " .align 2,0x90\n"
20361- "2: movl 0(%4), %%eax\n"
20362- "21: movl 4(%4), %%edx\n"
20363+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20364+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20365 " movnti %%eax, 0(%3)\n"
20366 " movnti %%edx, 4(%3)\n"
20367- "3: movl 8(%4), %%eax\n"
20368- "31: movl 12(%4),%%edx\n"
20369+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20370+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20371 " movnti %%eax, 8(%3)\n"
20372 " movnti %%edx, 12(%3)\n"
20373- "4: movl 16(%4), %%eax\n"
20374- "41: movl 20(%4), %%edx\n"
20375+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20376+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20377 " movnti %%eax, 16(%3)\n"
20378 " movnti %%edx, 20(%3)\n"
20379- "10: movl 24(%4), %%eax\n"
20380- "51: movl 28(%4), %%edx\n"
20381+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20382+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20383 " movnti %%eax, 24(%3)\n"
20384 " movnti %%edx, 28(%3)\n"
20385- "11: movl 32(%4), %%eax\n"
20386- "61: movl 36(%4), %%edx\n"
20387+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20388+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20389 " movnti %%eax, 32(%3)\n"
20390 " movnti %%edx, 36(%3)\n"
20391- "12: movl 40(%4), %%eax\n"
20392- "71: movl 44(%4), %%edx\n"
20393+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20394+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20395 " movnti %%eax, 40(%3)\n"
20396 " movnti %%edx, 44(%3)\n"
20397- "13: movl 48(%4), %%eax\n"
20398- "81: movl 52(%4), %%edx\n"
20399+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20400+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20401 " movnti %%eax, 48(%3)\n"
20402 " movnti %%edx, 52(%3)\n"
20403- "14: movl 56(%4), %%eax\n"
20404- "91: movl 60(%4), %%edx\n"
20405+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20406+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20407 " movnti %%eax, 56(%3)\n"
20408 " movnti %%edx, 60(%3)\n"
20409 " addl $-64, %0\n"
20410@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
20411 " shrl $2, %0\n"
20412 " andl $3, %%eax\n"
20413 " cld\n"
20414- "6: rep; movsl\n"
20415+ "6: rep; "__copyuser_seg" movsl\n"
20416 " movl %%eax,%0\n"
20417- "7: rep; movsb\n"
20418+ "7: rep; "__copyuser_seg" movsb\n"
20419 "8:\n"
20420 ".section .fixup,\"ax\"\n"
20421 "9: lea 0(%%eax,%0,4),%0\n"
20422@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
20423
20424 __asm__ __volatile__(
20425 " .align 2,0x90\n"
20426- "0: movl 32(%4), %%eax\n"
20427+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20428 " cmpl $67, %0\n"
20429 " jbe 2f\n"
20430- "1: movl 64(%4), %%eax\n"
20431+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20432 " .align 2,0x90\n"
20433- "2: movl 0(%4), %%eax\n"
20434- "21: movl 4(%4), %%edx\n"
20435+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20436+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20437 " movnti %%eax, 0(%3)\n"
20438 " movnti %%edx, 4(%3)\n"
20439- "3: movl 8(%4), %%eax\n"
20440- "31: movl 12(%4),%%edx\n"
20441+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20442+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20443 " movnti %%eax, 8(%3)\n"
20444 " movnti %%edx, 12(%3)\n"
20445- "4: movl 16(%4), %%eax\n"
20446- "41: movl 20(%4), %%edx\n"
20447+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20448+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20449 " movnti %%eax, 16(%3)\n"
20450 " movnti %%edx, 20(%3)\n"
20451- "10: movl 24(%4), %%eax\n"
20452- "51: movl 28(%4), %%edx\n"
20453+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20454+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20455 " movnti %%eax, 24(%3)\n"
20456 " movnti %%edx, 28(%3)\n"
20457- "11: movl 32(%4), %%eax\n"
20458- "61: movl 36(%4), %%edx\n"
20459+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20460+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20461 " movnti %%eax, 32(%3)\n"
20462 " movnti %%edx, 36(%3)\n"
20463- "12: movl 40(%4), %%eax\n"
20464- "71: movl 44(%4), %%edx\n"
20465+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20466+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20467 " movnti %%eax, 40(%3)\n"
20468 " movnti %%edx, 44(%3)\n"
20469- "13: movl 48(%4), %%eax\n"
20470- "81: movl 52(%4), %%edx\n"
20471+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20472+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20473 " movnti %%eax, 48(%3)\n"
20474 " movnti %%edx, 52(%3)\n"
20475- "14: movl 56(%4), %%eax\n"
20476- "91: movl 60(%4), %%edx\n"
20477+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20478+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20479 " movnti %%eax, 56(%3)\n"
20480 " movnti %%edx, 60(%3)\n"
20481 " addl $-64, %0\n"
20482@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
20483 " shrl $2, %0\n"
20484 " andl $3, %%eax\n"
20485 " cld\n"
20486- "6: rep; movsl\n"
20487+ "6: rep; "__copyuser_seg" movsl\n"
20488 " movl %%eax,%0\n"
20489- "7: rep; movsb\n"
20490+ "7: rep; "__copyuser_seg" movsb\n"
20491 "8:\n"
20492 ".section .fixup,\"ax\"\n"
20493 "9: lea 0(%%eax,%0,4),%0\n"
20494@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
20495 */
20496 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
20497 unsigned long size);
20498-unsigned long __copy_user_intel(void __user *to, const void *from,
20499+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
20500+ unsigned long size);
20501+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
20502 unsigned long size);
20503 unsigned long __copy_user_zeroing_intel_nocache(void *to,
20504 const void __user *from, unsigned long size);
20505 #endif /* CONFIG_X86_INTEL_USERCOPY */
20506
20507 /* Generic arbitrary sized copy. */
20508-#define __copy_user(to, from, size) \
20509+#define __copy_user(to, from, size, prefix, set, restore) \
20510 do { \
20511 int __d0, __d1, __d2; \
20512 __asm__ __volatile__( \
20513+ set \
20514 " cmp $7,%0\n" \
20515 " jbe 1f\n" \
20516 " movl %1,%0\n" \
20517 " negl %0\n" \
20518 " andl $7,%0\n" \
20519 " subl %0,%3\n" \
20520- "4: rep; movsb\n" \
20521+ "4: rep; "prefix"movsb\n" \
20522 " movl %3,%0\n" \
20523 " shrl $2,%0\n" \
20524 " andl $3,%3\n" \
20525 " .align 2,0x90\n" \
20526- "0: rep; movsl\n" \
20527+ "0: rep; "prefix"movsl\n" \
20528 " movl %3,%0\n" \
20529- "1: rep; movsb\n" \
20530+ "1: rep; "prefix"movsb\n" \
20531 "2:\n" \
20532+ restore \
20533 ".section .fixup,\"ax\"\n" \
20534 "5: addl %3,%0\n" \
20535 " jmp 2b\n" \
20536@@ -682,14 +799,14 @@ do { \
20537 " negl %0\n" \
20538 " andl $7,%0\n" \
20539 " subl %0,%3\n" \
20540- "4: rep; movsb\n" \
20541+ "4: rep; "__copyuser_seg"movsb\n" \
20542 " movl %3,%0\n" \
20543 " shrl $2,%0\n" \
20544 " andl $3,%3\n" \
20545 " .align 2,0x90\n" \
20546- "0: rep; movsl\n" \
20547+ "0: rep; "__copyuser_seg"movsl\n" \
20548 " movl %3,%0\n" \
20549- "1: rep; movsb\n" \
20550+ "1: rep; "__copyuser_seg"movsb\n" \
20551 "2:\n" \
20552 ".section .fixup,\"ax\"\n" \
20553 "5: addl %3,%0\n" \
20554@@ -775,9 +892,9 @@ survive:
20555 }
20556 #endif
20557 if (movsl_is_ok(to, from, n))
20558- __copy_user(to, from, n);
20559+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
20560 else
20561- n = __copy_user_intel(to, from, n);
20562+ n = __generic_copy_to_user_intel(to, from, n);
20563 return n;
20564 }
20565 EXPORT_SYMBOL(__copy_to_user_ll);
20566@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
20567 unsigned long n)
20568 {
20569 if (movsl_is_ok(to, from, n))
20570- __copy_user(to, from, n);
20571+ __copy_user(to, from, n, __copyuser_seg, "", "");
20572 else
20573- n = __copy_user_intel((void __user *)to,
20574- (const void *)from, n);
20575+ n = __generic_copy_from_user_intel(to, from, n);
20576 return n;
20577 }
20578 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
20579@@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
20580 if (n > 64 && cpu_has_xmm2)
20581 n = __copy_user_intel_nocache(to, from, n);
20582 else
20583- __copy_user(to, from, n);
20584+ __copy_user(to, from, n, __copyuser_seg, "", "");
20585 #else
20586- __copy_user(to, from, n);
20587+ __copy_user(to, from, n, __copyuser_seg, "", "");
20588 #endif
20589 return n;
20590 }
20591 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
20592
20593-/**
20594- * copy_to_user: - Copy a block of data into user space.
20595- * @to: Destination address, in user space.
20596- * @from: Source address, in kernel space.
20597- * @n: Number of bytes to copy.
20598- *
20599- * Context: User context only. This function may sleep.
20600- *
20601- * Copy data from kernel space to user space.
20602- *
20603- * Returns number of bytes that could not be copied.
20604- * On success, this will be zero.
20605- */
20606-unsigned long
20607-copy_to_user(void __user *to, const void *from, unsigned long n)
20608+#ifdef CONFIG_PAX_MEMORY_UDEREF
20609+void __set_fs(mm_segment_t x)
20610 {
20611- if (access_ok(VERIFY_WRITE, to, n))
20612- n = __copy_to_user(to, from, n);
20613- return n;
20614+ switch (x.seg) {
20615+ case 0:
20616+ loadsegment(gs, 0);
20617+ break;
20618+ case TASK_SIZE_MAX:
20619+ loadsegment(gs, __USER_DS);
20620+ break;
20621+ case -1UL:
20622+ loadsegment(gs, __KERNEL_DS);
20623+ break;
20624+ default:
20625+ BUG();
20626+ }
20627+ return;
20628 }
20629-EXPORT_SYMBOL(copy_to_user);
20630+EXPORT_SYMBOL(__set_fs);
20631
20632-/**
20633- * copy_from_user: - Copy a block of data from user space.
20634- * @to: Destination address, in kernel space.
20635- * @from: Source address, in user space.
20636- * @n: Number of bytes to copy.
20637- *
20638- * Context: User context only. This function may sleep.
20639- *
20640- * Copy data from user space to kernel space.
20641- *
20642- * Returns number of bytes that could not be copied.
20643- * On success, this will be zero.
20644- *
20645- * If some data could not be copied, this function will pad the copied
20646- * data to the requested size using zero bytes.
20647- */
20648-unsigned long
20649-copy_from_user(void *to, const void __user *from, unsigned long n)
20650+void set_fs(mm_segment_t x)
20651 {
20652- if (access_ok(VERIFY_READ, from, n))
20653- n = __copy_from_user(to, from, n);
20654- else
20655- memset(to, 0, n);
20656- return n;
20657+ current_thread_info()->addr_limit = x;
20658+ __set_fs(x);
20659 }
20660-EXPORT_SYMBOL(copy_from_user);
20661+EXPORT_SYMBOL(set_fs);
20662+#endif
20663diff -urNp linux-2.6.32.45/arch/x86/lib/usercopy_64.c linux-2.6.32.45/arch/x86/lib/usercopy_64.c
20664--- linux-2.6.32.45/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
20665+++ linux-2.6.32.45/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
20666@@ -42,6 +42,12 @@ long
20667 __strncpy_from_user(char *dst, const char __user *src, long count)
20668 {
20669 long res;
20670+
20671+#ifdef CONFIG_PAX_MEMORY_UDEREF
20672+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
20673+ src += PAX_USER_SHADOW_BASE;
20674+#endif
20675+
20676 __do_strncpy_from_user(dst, src, count, res);
20677 return res;
20678 }
20679@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
20680 {
20681 long __d0;
20682 might_fault();
20683+
20684+#ifdef CONFIG_PAX_MEMORY_UDEREF
20685+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
20686+ addr += PAX_USER_SHADOW_BASE;
20687+#endif
20688+
20689 /* no memory constraint because it doesn't change any memory gcc knows
20690 about */
20691 asm volatile(
20692@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
20693
20694 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
20695 {
20696- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20697+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20698+
20699+#ifdef CONFIG_PAX_MEMORY_UDEREF
20700+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
20701+ to += PAX_USER_SHADOW_BASE;
20702+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
20703+ from += PAX_USER_SHADOW_BASE;
20704+#endif
20705+
20706 return copy_user_generic((__force void *)to, (__force void *)from, len);
20707- }
20708- return len;
20709+ }
20710+ return len;
20711 }
20712 EXPORT_SYMBOL(copy_in_user);
20713
20714diff -urNp linux-2.6.32.45/arch/x86/Makefile linux-2.6.32.45/arch/x86/Makefile
20715--- linux-2.6.32.45/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
20716+++ linux-2.6.32.45/arch/x86/Makefile 2011-07-19 18:16:02.000000000 -0400
20717@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
20718 else
20719 BITS := 64
20720 UTS_MACHINE := x86_64
20721+ biarch := $(call cc-option,-m64)
20722 CHECKFLAGS += -D__x86_64__ -m64
20723
20724 KBUILD_AFLAGS += -m64
20725@@ -189,3 +190,12 @@ define archhelp
20726 echo ' FDARGS="..." arguments for the booted kernel'
20727 echo ' FDINITRD=file initrd for the booted kernel'
20728 endef
20729+
20730+define OLD_LD
20731+
20732+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
20733+*** Please upgrade your binutils to 2.18 or newer
20734+endef
20735+
20736+archprepare:
20737+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
20738diff -urNp linux-2.6.32.45/arch/x86/mm/extable.c linux-2.6.32.45/arch/x86/mm/extable.c
20739--- linux-2.6.32.45/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
20740+++ linux-2.6.32.45/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
20741@@ -1,14 +1,71 @@
20742 #include <linux/module.h>
20743 #include <linux/spinlock.h>
20744+#include <linux/sort.h>
20745 #include <asm/uaccess.h>
20746+#include <asm/pgtable.h>
20747
20748+/*
20749+ * The exception table needs to be sorted so that the binary
20750+ * search that we use to find entries in it works properly.
20751+ * This is used both for the kernel exception table and for
20752+ * the exception tables of modules that get loaded.
20753+ */
20754+static int cmp_ex(const void *a, const void *b)
20755+{
20756+ const struct exception_table_entry *x = a, *y = b;
20757+
20758+ /* avoid overflow */
20759+ if (x->insn > y->insn)
20760+ return 1;
20761+ if (x->insn < y->insn)
20762+ return -1;
20763+ return 0;
20764+}
20765+
20766+static void swap_ex(void *a, void *b, int size)
20767+{
20768+ struct exception_table_entry t, *x = a, *y = b;
20769+
20770+ t = *x;
20771+
20772+ pax_open_kernel();
20773+ *x = *y;
20774+ *y = t;
20775+ pax_close_kernel();
20776+}
20777+
20778+void sort_extable(struct exception_table_entry *start,
20779+ struct exception_table_entry *finish)
20780+{
20781+ sort(start, finish - start, sizeof(struct exception_table_entry),
20782+ cmp_ex, swap_ex);
20783+}
20784+
20785+#ifdef CONFIG_MODULES
20786+/*
20787+ * If the exception table is sorted, any referring to the module init
20788+ * will be at the beginning or the end.
20789+ */
20790+void trim_init_extable(struct module *m)
20791+{
20792+ /*trim the beginning*/
20793+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
20794+ m->extable++;
20795+ m->num_exentries--;
20796+ }
20797+ /*trim the end*/
20798+ while (m->num_exentries &&
20799+ within_module_init(m->extable[m->num_exentries-1].insn, m))
20800+ m->num_exentries--;
20801+}
20802+#endif /* CONFIG_MODULES */
20803
20804 int fixup_exception(struct pt_regs *regs)
20805 {
20806 const struct exception_table_entry *fixup;
20807
20808 #ifdef CONFIG_PNPBIOS
20809- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
20810+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
20811 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
20812 extern u32 pnp_bios_is_utter_crap;
20813 pnp_bios_is_utter_crap = 1;
20814diff -urNp linux-2.6.32.45/arch/x86/mm/fault.c linux-2.6.32.45/arch/x86/mm/fault.c
20815--- linux-2.6.32.45/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
20816+++ linux-2.6.32.45/arch/x86/mm/fault.c 2011-08-17 20:06:44.000000000 -0400
20817@@ -11,10 +11,19 @@
20818 #include <linux/kprobes.h> /* __kprobes, ... */
20819 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
20820 #include <linux/perf_event.h> /* perf_sw_event */
20821+#include <linux/unistd.h>
20822+#include <linux/compiler.h>
20823
20824 #include <asm/traps.h> /* dotraplinkage, ... */
20825 #include <asm/pgalloc.h> /* pgd_*(), ... */
20826 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20827+#include <asm/vsyscall.h>
20828+#include <asm/tlbflush.h>
20829+
20830+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20831+#include <asm/stacktrace.h>
20832+#include "../kernel/dumpstack.h"
20833+#endif
20834
20835 /*
20836 * Page fault error code bits:
20837@@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
20838 int ret = 0;
20839
20840 /* kprobe_running() needs smp_processor_id() */
20841- if (kprobes_built_in() && !user_mode_vm(regs)) {
20842+ if (kprobes_built_in() && !user_mode(regs)) {
20843 preempt_disable();
20844 if (kprobe_running() && kprobe_fault_handler(regs, 14))
20845 ret = 1;
20846@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
20847 return !instr_lo || (instr_lo>>1) == 1;
20848 case 0x00:
20849 /* Prefetch instruction is 0x0F0D or 0x0F18 */
20850- if (probe_kernel_address(instr, opcode))
20851+ if (user_mode(regs)) {
20852+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20853+ return 0;
20854+ } else if (probe_kernel_address(instr, opcode))
20855 return 0;
20856
20857 *prefetch = (instr_lo == 0xF) &&
20858@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
20859 while (instr < max_instr) {
20860 unsigned char opcode;
20861
20862- if (probe_kernel_address(instr, opcode))
20863+ if (user_mode(regs)) {
20864+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20865+ break;
20866+ } else if (probe_kernel_address(instr, opcode))
20867 break;
20868
20869 instr++;
20870@@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
20871 force_sig_info(si_signo, &info, tsk);
20872 }
20873
20874+#ifdef CONFIG_PAX_EMUTRAMP
20875+static int pax_handle_fetch_fault(struct pt_regs *regs);
20876+#endif
20877+
20878+#ifdef CONFIG_PAX_PAGEEXEC
20879+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
20880+{
20881+ pgd_t *pgd;
20882+ pud_t *pud;
20883+ pmd_t *pmd;
20884+
20885+ pgd = pgd_offset(mm, address);
20886+ if (!pgd_present(*pgd))
20887+ return NULL;
20888+ pud = pud_offset(pgd, address);
20889+ if (!pud_present(*pud))
20890+ return NULL;
20891+ pmd = pmd_offset(pud, address);
20892+ if (!pmd_present(*pmd))
20893+ return NULL;
20894+ return pmd;
20895+}
20896+#endif
20897+
20898 DEFINE_SPINLOCK(pgd_lock);
20899 LIST_HEAD(pgd_list);
20900
20901@@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
20902 address += PMD_SIZE) {
20903
20904 unsigned long flags;
20905+
20906+#ifdef CONFIG_PAX_PER_CPU_PGD
20907+ unsigned long cpu;
20908+#else
20909 struct page *page;
20910+#endif
20911
20912 spin_lock_irqsave(&pgd_lock, flags);
20913+
20914+#ifdef CONFIG_PAX_PER_CPU_PGD
20915+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20916+ pgd_t *pgd = get_cpu_pgd(cpu);
20917+#else
20918 list_for_each_entry(page, &pgd_list, lru) {
20919- if (!vmalloc_sync_one(page_address(page), address))
20920+ pgd_t *pgd = page_address(page);
20921+#endif
20922+
20923+ if (!vmalloc_sync_one(pgd, address))
20924 break;
20925 }
20926 spin_unlock_irqrestore(&pgd_lock, flags);
20927@@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
20928 * an interrupt in the middle of a task switch..
20929 */
20930 pgd_paddr = read_cr3();
20931+
20932+#ifdef CONFIG_PAX_PER_CPU_PGD
20933+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
20934+#endif
20935+
20936 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
20937 if (!pmd_k)
20938 return -1;
20939@@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
20940
20941 const pgd_t *pgd_ref = pgd_offset_k(address);
20942 unsigned long flags;
20943+
20944+#ifdef CONFIG_PAX_PER_CPU_PGD
20945+ unsigned long cpu;
20946+#else
20947 struct page *page;
20948+#endif
20949
20950 if (pgd_none(*pgd_ref))
20951 continue;
20952
20953 spin_lock_irqsave(&pgd_lock, flags);
20954+
20955+#ifdef CONFIG_PAX_PER_CPU_PGD
20956+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20957+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
20958+#else
20959 list_for_each_entry(page, &pgd_list, lru) {
20960 pgd_t *pgd;
20961 pgd = (pgd_t *)page_address(page) + pgd_index(address);
20962+#endif
20963+
20964 if (pgd_none(*pgd))
20965 set_pgd(pgd, *pgd_ref);
20966 else
20967@@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
20968 * happen within a race in page table update. In the later
20969 * case just flush:
20970 */
20971+
20972+#ifdef CONFIG_PAX_PER_CPU_PGD
20973+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
20974+ pgd = pgd_offset_cpu(smp_processor_id(), address);
20975+#else
20976 pgd = pgd_offset(current->active_mm, address);
20977+#endif
20978+
20979 pgd_ref = pgd_offset_k(address);
20980 if (pgd_none(*pgd_ref))
20981 return -1;
20982@@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
20983 static int is_errata100(struct pt_regs *regs, unsigned long address)
20984 {
20985 #ifdef CONFIG_X86_64
20986- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
20987+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
20988 return 1;
20989 #endif
20990 return 0;
20991@@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
20992 }
20993
20994 static const char nx_warning[] = KERN_CRIT
20995-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
20996+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
20997
20998 static void
20999 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21000@@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
21001 if (!oops_may_print())
21002 return;
21003
21004- if (error_code & PF_INSTR) {
21005+ if (nx_enabled && (error_code & PF_INSTR)) {
21006 unsigned int level;
21007
21008 pte_t *pte = lookup_address(address, &level);
21009
21010 if (pte && pte_present(*pte) && !pte_exec(*pte))
21011- printk(nx_warning, current_uid());
21012+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21013 }
21014
21015+#ifdef CONFIG_PAX_KERNEXEC
21016+ if (init_mm.start_code <= address && address < init_mm.end_code) {
21017+ if (current->signal->curr_ip)
21018+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21019+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21020+ else
21021+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21022+ current->comm, task_pid_nr(current), current_uid(), current_euid());
21023+ }
21024+#endif
21025+
21026 printk(KERN_ALERT "BUG: unable to handle kernel ");
21027 if (address < PAGE_SIZE)
21028 printk(KERN_CONT "NULL pointer dereference");
21029@@ -704,6 +791,70 @@ __bad_area_nosemaphore(struct pt_regs *r
21030 unsigned long address, int si_code)
21031 {
21032 struct task_struct *tsk = current;
21033+#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21034+ struct mm_struct *mm = tsk->mm;
21035+#endif
21036+
21037+#ifdef CONFIG_X86_64
21038+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
21039+ if (regs->ip == (unsigned long)vgettimeofday) {
21040+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
21041+ return;
21042+ } else if (regs->ip == (unsigned long)vtime) {
21043+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
21044+ return;
21045+ } else if (regs->ip == (unsigned long)vgetcpu) {
21046+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
21047+ return;
21048+ }
21049+ }
21050+#endif
21051+
21052+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21053+ if (mm && (error_code & PF_USER)) {
21054+ unsigned long ip = regs->ip;
21055+
21056+ if (v8086_mode(regs))
21057+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21058+
21059+ /*
21060+ * It's possible to have interrupts off here:
21061+ */
21062+ local_irq_enable();
21063+
21064+#ifdef CONFIG_PAX_PAGEEXEC
21065+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
21066+ ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
21067+
21068+#ifdef CONFIG_PAX_EMUTRAMP
21069+ switch (pax_handle_fetch_fault(regs)) {
21070+ case 2:
21071+ return;
21072+ }
21073+#endif
21074+
21075+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
21076+ do_group_exit(SIGKILL);
21077+ }
21078+#endif
21079+
21080+#ifdef CONFIG_PAX_SEGMEXEC
21081+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
21082+
21083+#ifdef CONFIG_PAX_EMUTRAMP
21084+ switch (pax_handle_fetch_fault(regs)) {
21085+ case 2:
21086+ return;
21087+ }
21088+#endif
21089+
21090+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
21091+ do_group_exit(SIGKILL);
21092+ }
21093+#endif
21094+
21095+ }
21096+#endif
21097
21098 /* User mode accesses just cause a SIGSEGV */
21099 if (error_code & PF_USER) {
21100@@ -857,6 +1008,99 @@ static int spurious_fault_check(unsigned
21101 return 1;
21102 }
21103
21104+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21105+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21106+{
21107+ pte_t *pte;
21108+ pmd_t *pmd;
21109+ spinlock_t *ptl;
21110+ unsigned char pte_mask;
21111+
21112+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21113+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
21114+ return 0;
21115+
21116+ /* PaX: it's our fault, let's handle it if we can */
21117+
21118+ /* PaX: take a look at read faults before acquiring any locks */
21119+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21120+ /* instruction fetch attempt from a protected page in user mode */
21121+ up_read(&mm->mmap_sem);
21122+
21123+#ifdef CONFIG_PAX_EMUTRAMP
21124+ switch (pax_handle_fetch_fault(regs)) {
21125+ case 2:
21126+ return 1;
21127+ }
21128+#endif
21129+
21130+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21131+ do_group_exit(SIGKILL);
21132+ }
21133+
21134+ pmd = pax_get_pmd(mm, address);
21135+ if (unlikely(!pmd))
21136+ return 0;
21137+
21138+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21139+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21140+ pte_unmap_unlock(pte, ptl);
21141+ return 0;
21142+ }
21143+
21144+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21145+ /* write attempt to a protected page in user mode */
21146+ pte_unmap_unlock(pte, ptl);
21147+ return 0;
21148+ }
21149+
21150+#ifdef CONFIG_SMP
21151+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21152+#else
21153+ if (likely(address > get_limit(regs->cs)))
21154+#endif
21155+ {
21156+ set_pte(pte, pte_mkread(*pte));
21157+ __flush_tlb_one(address);
21158+ pte_unmap_unlock(pte, ptl);
21159+ up_read(&mm->mmap_sem);
21160+ return 1;
21161+ }
21162+
21163+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21164+
21165+ /*
21166+ * PaX: fill DTLB with user rights and retry
21167+ */
21168+ __asm__ __volatile__ (
21169+ "orb %2,(%1)\n"
21170+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21171+/*
21172+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21173+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21174+ * page fault when examined during a TLB load attempt. this is true not only
21175+ * for PTEs holding a non-present entry but also present entries that will
21176+ * raise a page fault (such as those set up by PaX, or the copy-on-write
21177+ * mechanism). in effect it means that we do *not* need to flush the TLBs
21178+ * for our target pages since their PTEs are simply not in the TLBs at all.
21179+
21180+ * the best thing in omitting it is that we gain around 15-20% speed in the
21181+ * fast path of the page fault handler and can get rid of tracing since we
21182+ * can no longer flush unintended entries.
21183+ */
21184+ "invlpg (%0)\n"
21185+#endif
21186+ __copyuser_seg"testb $0,(%0)\n"
21187+ "xorb %3,(%1)\n"
21188+ :
21189+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21190+ : "memory", "cc");
21191+ pte_unmap_unlock(pte, ptl);
21192+ up_read(&mm->mmap_sem);
21193+ return 1;
21194+}
21195+#endif
21196+
21197 /*
21198 * Handle a spurious fault caused by a stale TLB entry.
21199 *
21200@@ -923,6 +1167,9 @@ int show_unhandled_signals = 1;
21201 static inline int
21202 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
21203 {
21204+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21205+ return 1;
21206+
21207 if (write) {
21208 /* write, present and write, not present: */
21209 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21210@@ -956,17 +1203,31 @@ do_page_fault(struct pt_regs *regs, unsi
21211 {
21212 struct vm_area_struct *vma;
21213 struct task_struct *tsk;
21214- unsigned long address;
21215 struct mm_struct *mm;
21216 int write;
21217 int fault;
21218
21219+ /* Get the faulting address: */
21220+ unsigned long address = read_cr2();
21221+
21222+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21223+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21224+ if (!search_exception_tables(regs->ip)) {
21225+ bad_area_nosemaphore(regs, error_code, address);
21226+ return;
21227+ }
21228+ if (address < PAX_USER_SHADOW_BASE) {
21229+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21230+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
21231+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21232+ } else
21233+ address -= PAX_USER_SHADOW_BASE;
21234+ }
21235+#endif
21236+
21237 tsk = current;
21238 mm = tsk->mm;
21239
21240- /* Get the faulting address: */
21241- address = read_cr2();
21242-
21243 /*
21244 * Detect and handle instructions that would cause a page fault for
21245 * both a tracked kernel page and a userspace page.
21246@@ -1026,7 +1287,7 @@ do_page_fault(struct pt_regs *regs, unsi
21247 * User-mode registers count as a user access even for any
21248 * potential system fault or CPU buglet:
21249 */
21250- if (user_mode_vm(regs)) {
21251+ if (user_mode(regs)) {
21252 local_irq_enable();
21253 error_code |= PF_USER;
21254 } else {
21255@@ -1080,6 +1341,11 @@ do_page_fault(struct pt_regs *regs, unsi
21256 might_sleep();
21257 }
21258
21259+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21260+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21261+ return;
21262+#endif
21263+
21264 vma = find_vma(mm, address);
21265 if (unlikely(!vma)) {
21266 bad_area(regs, error_code, address);
21267@@ -1091,18 +1357,24 @@ do_page_fault(struct pt_regs *regs, unsi
21268 bad_area(regs, error_code, address);
21269 return;
21270 }
21271- if (error_code & PF_USER) {
21272- /*
21273- * Accessing the stack below %sp is always a bug.
21274- * The large cushion allows instructions like enter
21275- * and pusha to work. ("enter $65535, $31" pushes
21276- * 32 pointers and then decrements %sp by 65535.)
21277- */
21278- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21279- bad_area(regs, error_code, address);
21280- return;
21281- }
21282+ /*
21283+ * Accessing the stack below %sp is always a bug.
21284+ * The large cushion allows instructions like enter
21285+ * and pusha to work. ("enter $65535, $31" pushes
21286+ * 32 pointers and then decrements %sp by 65535.)
21287+ */
21288+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21289+ bad_area(regs, error_code, address);
21290+ return;
21291 }
21292+
21293+#ifdef CONFIG_PAX_SEGMEXEC
21294+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21295+ bad_area(regs, error_code, address);
21296+ return;
21297+ }
21298+#endif
21299+
21300 if (unlikely(expand_stack(vma, address))) {
21301 bad_area(regs, error_code, address);
21302 return;
21303@@ -1146,3 +1418,199 @@ good_area:
21304
21305 up_read(&mm->mmap_sem);
21306 }
21307+
21308+#ifdef CONFIG_PAX_EMUTRAMP
21309+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21310+{
21311+ int err;
21312+
21313+ do { /* PaX: gcc trampoline emulation #1 */
21314+ unsigned char mov1, mov2;
21315+ unsigned short jmp;
21316+ unsigned int addr1, addr2;
21317+
21318+#ifdef CONFIG_X86_64
21319+ if ((regs->ip + 11) >> 32)
21320+ break;
21321+#endif
21322+
21323+ err = get_user(mov1, (unsigned char __user *)regs->ip);
21324+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21325+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21326+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21327+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21328+
21329+ if (err)
21330+ break;
21331+
21332+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21333+ regs->cx = addr1;
21334+ regs->ax = addr2;
21335+ regs->ip = addr2;
21336+ return 2;
21337+ }
21338+ } while (0);
21339+
21340+ do { /* PaX: gcc trampoline emulation #2 */
21341+ unsigned char mov, jmp;
21342+ unsigned int addr1, addr2;
21343+
21344+#ifdef CONFIG_X86_64
21345+ if ((regs->ip + 9) >> 32)
21346+ break;
21347+#endif
21348+
21349+ err = get_user(mov, (unsigned char __user *)regs->ip);
21350+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21351+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21352+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21353+
21354+ if (err)
21355+ break;
21356+
21357+ if (mov == 0xB9 && jmp == 0xE9) {
21358+ regs->cx = addr1;
21359+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21360+ return 2;
21361+ }
21362+ } while (0);
21363+
21364+ return 1; /* PaX in action */
21365+}
21366+
21367+#ifdef CONFIG_X86_64
21368+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21369+{
21370+ int err;
21371+
21372+ do { /* PaX: gcc trampoline emulation #1 */
21373+ unsigned short mov1, mov2, jmp1;
21374+ unsigned char jmp2;
21375+ unsigned int addr1;
21376+ unsigned long addr2;
21377+
21378+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21379+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
21380+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
21381+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
21382+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
21383+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
21384+
21385+ if (err)
21386+ break;
21387+
21388+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21389+ regs->r11 = addr1;
21390+ regs->r10 = addr2;
21391+ regs->ip = addr1;
21392+ return 2;
21393+ }
21394+ } while (0);
21395+
21396+ do { /* PaX: gcc trampoline emulation #2 */
21397+ unsigned short mov1, mov2, jmp1;
21398+ unsigned char jmp2;
21399+ unsigned long addr1, addr2;
21400+
21401+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21402+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21403+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21404+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21405+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
21406+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
21407+
21408+ if (err)
21409+ break;
21410+
21411+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21412+ regs->r11 = addr1;
21413+ regs->r10 = addr2;
21414+ regs->ip = addr1;
21415+ return 2;
21416+ }
21417+ } while (0);
21418+
21419+ return 1; /* PaX in action */
21420+}
21421+#endif
21422+
21423+/*
21424+ * PaX: decide what to do with offenders (regs->ip = fault address)
21425+ *
21426+ * returns 1 when task should be killed
21427+ * 2 when gcc trampoline was detected
21428+ */
21429+static int pax_handle_fetch_fault(struct pt_regs *regs)
21430+{
21431+ if (v8086_mode(regs))
21432+ return 1;
21433+
21434+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21435+ return 1;
21436+
21437+#ifdef CONFIG_X86_32
21438+ return pax_handle_fetch_fault_32(regs);
21439+#else
21440+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
21441+ return pax_handle_fetch_fault_32(regs);
21442+ else
21443+ return pax_handle_fetch_fault_64(regs);
21444+#endif
21445+}
21446+#endif
21447+
21448+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21449+void pax_report_insns(void *pc, void *sp)
21450+{
21451+ long i;
21452+
21453+ printk(KERN_ERR "PAX: bytes at PC: ");
21454+ for (i = 0; i < 20; i++) {
21455+ unsigned char c;
21456+ if (get_user(c, (__force unsigned char __user *)pc+i))
21457+ printk(KERN_CONT "?? ");
21458+ else
21459+ printk(KERN_CONT "%02x ", c);
21460+ }
21461+ printk("\n");
21462+
21463+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
21464+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
21465+ unsigned long c;
21466+ if (get_user(c, (__force unsigned long __user *)sp+i))
21467+#ifdef CONFIG_X86_32
21468+ printk(KERN_CONT "???????? ");
21469+#else
21470+ printk(KERN_CONT "???????????????? ");
21471+#endif
21472+ else
21473+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
21474+ }
21475+ printk("\n");
21476+}
21477+#endif
21478+
21479+/**
21480+ * probe_kernel_write(): safely attempt to write to a location
21481+ * @dst: address to write to
21482+ * @src: pointer to the data that shall be written
21483+ * @size: size of the data chunk
21484+ *
21485+ * Safely write to address @dst from the buffer at @src. If a kernel fault
21486+ * happens, handle that and return -EFAULT.
21487+ */
21488+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
21489+{
21490+ long ret;
21491+ mm_segment_t old_fs = get_fs();
21492+
21493+ set_fs(KERNEL_DS);
21494+ pagefault_disable();
21495+ pax_open_kernel();
21496+ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
21497+ pax_close_kernel();
21498+ pagefault_enable();
21499+ set_fs(old_fs);
21500+
21501+ return ret ? -EFAULT : 0;
21502+}
21503diff -urNp linux-2.6.32.45/arch/x86/mm/gup.c linux-2.6.32.45/arch/x86/mm/gup.c
21504--- linux-2.6.32.45/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
21505+++ linux-2.6.32.45/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
21506@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
21507 addr = start;
21508 len = (unsigned long) nr_pages << PAGE_SHIFT;
21509 end = start + len;
21510- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21511+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21512 (void __user *)start, len)))
21513 return 0;
21514
21515diff -urNp linux-2.6.32.45/arch/x86/mm/highmem_32.c linux-2.6.32.45/arch/x86/mm/highmem_32.c
21516--- linux-2.6.32.45/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
21517+++ linux-2.6.32.45/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
21518@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
21519 idx = type + KM_TYPE_NR*smp_processor_id();
21520 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21521 BUG_ON(!pte_none(*(kmap_pte-idx)));
21522+
21523+ pax_open_kernel();
21524 set_pte(kmap_pte-idx, mk_pte(page, prot));
21525+ pax_close_kernel();
21526
21527 return (void *)vaddr;
21528 }
21529diff -urNp linux-2.6.32.45/arch/x86/mm/hugetlbpage.c linux-2.6.32.45/arch/x86/mm/hugetlbpage.c
21530--- linux-2.6.32.45/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
21531+++ linux-2.6.32.45/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
21532@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
21533 struct hstate *h = hstate_file(file);
21534 struct mm_struct *mm = current->mm;
21535 struct vm_area_struct *vma;
21536- unsigned long start_addr;
21537+ unsigned long start_addr, pax_task_size = TASK_SIZE;
21538+
21539+#ifdef CONFIG_PAX_SEGMEXEC
21540+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21541+ pax_task_size = SEGMEXEC_TASK_SIZE;
21542+#endif
21543+
21544+ pax_task_size -= PAGE_SIZE;
21545
21546 if (len > mm->cached_hole_size) {
21547- start_addr = mm->free_area_cache;
21548+ start_addr = mm->free_area_cache;
21549 } else {
21550- start_addr = TASK_UNMAPPED_BASE;
21551- mm->cached_hole_size = 0;
21552+ start_addr = mm->mmap_base;
21553+ mm->cached_hole_size = 0;
21554 }
21555
21556 full_search:
21557@@ -281,26 +288,27 @@ full_search:
21558
21559 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
21560 /* At this point: (!vma || addr < vma->vm_end). */
21561- if (TASK_SIZE - len < addr) {
21562+ if (pax_task_size - len < addr) {
21563 /*
21564 * Start a new search - just in case we missed
21565 * some holes.
21566 */
21567- if (start_addr != TASK_UNMAPPED_BASE) {
21568- start_addr = TASK_UNMAPPED_BASE;
21569+ if (start_addr != mm->mmap_base) {
21570+ start_addr = mm->mmap_base;
21571 mm->cached_hole_size = 0;
21572 goto full_search;
21573 }
21574 return -ENOMEM;
21575 }
21576- if (!vma || addr + len <= vma->vm_start) {
21577- mm->free_area_cache = addr + len;
21578- return addr;
21579- }
21580+ if (check_heap_stack_gap(vma, addr, len))
21581+ break;
21582 if (addr + mm->cached_hole_size < vma->vm_start)
21583 mm->cached_hole_size = vma->vm_start - addr;
21584 addr = ALIGN(vma->vm_end, huge_page_size(h));
21585 }
21586+
21587+ mm->free_area_cache = addr + len;
21588+ return addr;
21589 }
21590
21591 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
21592@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
21593 {
21594 struct hstate *h = hstate_file(file);
21595 struct mm_struct *mm = current->mm;
21596- struct vm_area_struct *vma, *prev_vma;
21597- unsigned long base = mm->mmap_base, addr = addr0;
21598+ struct vm_area_struct *vma;
21599+ unsigned long base = mm->mmap_base, addr;
21600 unsigned long largest_hole = mm->cached_hole_size;
21601- int first_time = 1;
21602
21603 /* don't allow allocations above current base */
21604 if (mm->free_area_cache > base)
21605@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
21606 largest_hole = 0;
21607 mm->free_area_cache = base;
21608 }
21609-try_again:
21610+
21611 /* make sure it can fit in the remaining address space */
21612 if (mm->free_area_cache < len)
21613 goto fail;
21614
21615 /* either no address requested or cant fit in requested address hole */
21616- addr = (mm->free_area_cache - len) & huge_page_mask(h);
21617+ addr = (mm->free_area_cache - len);
21618 do {
21619+ addr &= huge_page_mask(h);
21620+ vma = find_vma(mm, addr);
21621 /*
21622 * Lookup failure means no vma is above this address,
21623 * i.e. return with success:
21624- */
21625- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
21626- return addr;
21627-
21628- /*
21629 * new region fits between prev_vma->vm_end and
21630 * vma->vm_start, use it:
21631 */
21632- if (addr + len <= vma->vm_start &&
21633- (!prev_vma || (addr >= prev_vma->vm_end))) {
21634+ if (check_heap_stack_gap(vma, addr, len)) {
21635 /* remember the address as a hint for next time */
21636- mm->cached_hole_size = largest_hole;
21637- return (mm->free_area_cache = addr);
21638- } else {
21639- /* pull free_area_cache down to the first hole */
21640- if (mm->free_area_cache == vma->vm_end) {
21641- mm->free_area_cache = vma->vm_start;
21642- mm->cached_hole_size = largest_hole;
21643- }
21644+ mm->cached_hole_size = largest_hole;
21645+ return (mm->free_area_cache = addr);
21646+ }
21647+ /* pull free_area_cache down to the first hole */
21648+ if (mm->free_area_cache == vma->vm_end) {
21649+ mm->free_area_cache = vma->vm_start;
21650+ mm->cached_hole_size = largest_hole;
21651 }
21652
21653 /* remember the largest hole we saw so far */
21654 if (addr + largest_hole < vma->vm_start)
21655- largest_hole = vma->vm_start - addr;
21656+ largest_hole = vma->vm_start - addr;
21657
21658 /* try just below the current vma->vm_start */
21659- addr = (vma->vm_start - len) & huge_page_mask(h);
21660- } while (len <= vma->vm_start);
21661+ addr = skip_heap_stack_gap(vma, len);
21662+ } while (!IS_ERR_VALUE(addr));
21663
21664 fail:
21665 /*
21666- * if hint left us with no space for the requested
21667- * mapping then try again:
21668- */
21669- if (first_time) {
21670- mm->free_area_cache = base;
21671- largest_hole = 0;
21672- first_time = 0;
21673- goto try_again;
21674- }
21675- /*
21676 * A failed mmap() very likely causes application failure,
21677 * so fall back to the bottom-up function here. This scenario
21678 * can happen with large stack limits and large mmap()
21679 * allocations.
21680 */
21681- mm->free_area_cache = TASK_UNMAPPED_BASE;
21682+
21683+#ifdef CONFIG_PAX_SEGMEXEC
21684+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21685+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21686+ else
21687+#endif
21688+
21689+ mm->mmap_base = TASK_UNMAPPED_BASE;
21690+
21691+#ifdef CONFIG_PAX_RANDMMAP
21692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21693+ mm->mmap_base += mm->delta_mmap;
21694+#endif
21695+
21696+ mm->free_area_cache = mm->mmap_base;
21697 mm->cached_hole_size = ~0UL;
21698 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
21699 len, pgoff, flags);
21700@@ -387,6 +393,7 @@ fail:
21701 /*
21702 * Restore the topdown base:
21703 */
21704+ mm->mmap_base = base;
21705 mm->free_area_cache = base;
21706 mm->cached_hole_size = ~0UL;
21707
21708@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
21709 struct hstate *h = hstate_file(file);
21710 struct mm_struct *mm = current->mm;
21711 struct vm_area_struct *vma;
21712+ unsigned long pax_task_size = TASK_SIZE;
21713
21714 if (len & ~huge_page_mask(h))
21715 return -EINVAL;
21716- if (len > TASK_SIZE)
21717+
21718+#ifdef CONFIG_PAX_SEGMEXEC
21719+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21720+ pax_task_size = SEGMEXEC_TASK_SIZE;
21721+#endif
21722+
21723+ pax_task_size -= PAGE_SIZE;
21724+
21725+ if (len > pax_task_size)
21726 return -ENOMEM;
21727
21728 if (flags & MAP_FIXED) {
21729@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
21730 if (addr) {
21731 addr = ALIGN(addr, huge_page_size(h));
21732 vma = find_vma(mm, addr);
21733- if (TASK_SIZE - len >= addr &&
21734- (!vma || addr + len <= vma->vm_start))
21735+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
21736 return addr;
21737 }
21738 if (mm->get_unmapped_area == arch_get_unmapped_area)
21739diff -urNp linux-2.6.32.45/arch/x86/mm/init_32.c linux-2.6.32.45/arch/x86/mm/init_32.c
21740--- linux-2.6.32.45/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
21741+++ linux-2.6.32.45/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
21742@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
21743 }
21744
21745 /*
21746- * Creates a middle page table and puts a pointer to it in the
21747- * given global directory entry. This only returns the gd entry
21748- * in non-PAE compilation mode, since the middle layer is folded.
21749- */
21750-static pmd_t * __init one_md_table_init(pgd_t *pgd)
21751-{
21752- pud_t *pud;
21753- pmd_t *pmd_table;
21754-
21755-#ifdef CONFIG_X86_PAE
21756- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
21757- if (after_bootmem)
21758- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
21759- else
21760- pmd_table = (pmd_t *)alloc_low_page();
21761- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
21762- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
21763- pud = pud_offset(pgd, 0);
21764- BUG_ON(pmd_table != pmd_offset(pud, 0));
21765-
21766- return pmd_table;
21767- }
21768-#endif
21769- pud = pud_offset(pgd, 0);
21770- pmd_table = pmd_offset(pud, 0);
21771-
21772- return pmd_table;
21773-}
21774-
21775-/*
21776 * Create a page table and place a pointer to it in a middle page
21777 * directory entry:
21778 */
21779@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
21780 page_table = (pte_t *)alloc_low_page();
21781
21782 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
21783+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21784+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
21785+#else
21786 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
21787+#endif
21788 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
21789 }
21790
21791 return pte_offset_kernel(pmd, 0);
21792 }
21793
21794+static pmd_t * __init one_md_table_init(pgd_t *pgd)
21795+{
21796+ pud_t *pud;
21797+ pmd_t *pmd_table;
21798+
21799+ pud = pud_offset(pgd, 0);
21800+ pmd_table = pmd_offset(pud, 0);
21801+
21802+ return pmd_table;
21803+}
21804+
21805 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
21806 {
21807 int pgd_idx = pgd_index(vaddr);
21808@@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
21809 int pgd_idx, pmd_idx;
21810 unsigned long vaddr;
21811 pgd_t *pgd;
21812+ pud_t *pud;
21813 pmd_t *pmd;
21814 pte_t *pte = NULL;
21815
21816@@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
21817 pgd = pgd_base + pgd_idx;
21818
21819 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
21820- pmd = one_md_table_init(pgd);
21821- pmd = pmd + pmd_index(vaddr);
21822+ pud = pud_offset(pgd, vaddr);
21823+ pmd = pmd_offset(pud, vaddr);
21824+
21825+#ifdef CONFIG_X86_PAE
21826+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21827+#endif
21828+
21829 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
21830 pmd++, pmd_idx++) {
21831 pte = page_table_kmap_check(one_page_table_init(pmd),
21832@@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
21833 }
21834 }
21835
21836-static inline int is_kernel_text(unsigned long addr)
21837+static inline int is_kernel_text(unsigned long start, unsigned long end)
21838 {
21839- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
21840- return 1;
21841- return 0;
21842+ if ((start > ktla_ktva((unsigned long)_etext) ||
21843+ end <= ktla_ktva((unsigned long)_stext)) &&
21844+ (start > ktla_ktva((unsigned long)_einittext) ||
21845+ end <= ktla_ktva((unsigned long)_sinittext)) &&
21846+
21847+#ifdef CONFIG_ACPI_SLEEP
21848+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
21849+#endif
21850+
21851+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
21852+ return 0;
21853+ return 1;
21854 }
21855
21856 /*
21857@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
21858 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
21859 unsigned long start_pfn, end_pfn;
21860 pgd_t *pgd_base = swapper_pg_dir;
21861- int pgd_idx, pmd_idx, pte_ofs;
21862+ unsigned int pgd_idx, pmd_idx, pte_ofs;
21863 unsigned long pfn;
21864 pgd_t *pgd;
21865+ pud_t *pud;
21866 pmd_t *pmd;
21867 pte_t *pte;
21868 unsigned pages_2m, pages_4k;
21869@@ -278,8 +279,13 @@ repeat:
21870 pfn = start_pfn;
21871 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21872 pgd = pgd_base + pgd_idx;
21873- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
21874- pmd = one_md_table_init(pgd);
21875+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
21876+ pud = pud_offset(pgd, 0);
21877+ pmd = pmd_offset(pud, 0);
21878+
21879+#ifdef CONFIG_X86_PAE
21880+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21881+#endif
21882
21883 if (pfn >= end_pfn)
21884 continue;
21885@@ -291,14 +297,13 @@ repeat:
21886 #endif
21887 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
21888 pmd++, pmd_idx++) {
21889- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
21890+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
21891
21892 /*
21893 * Map with big pages if possible, otherwise
21894 * create normal page tables:
21895 */
21896 if (use_pse) {
21897- unsigned int addr2;
21898 pgprot_t prot = PAGE_KERNEL_LARGE;
21899 /*
21900 * first pass will use the same initial
21901@@ -308,11 +313,7 @@ repeat:
21902 __pgprot(PTE_IDENT_ATTR |
21903 _PAGE_PSE);
21904
21905- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
21906- PAGE_OFFSET + PAGE_SIZE-1;
21907-
21908- if (is_kernel_text(addr) ||
21909- is_kernel_text(addr2))
21910+ if (is_kernel_text(address, address + PMD_SIZE))
21911 prot = PAGE_KERNEL_LARGE_EXEC;
21912
21913 pages_2m++;
21914@@ -329,7 +330,7 @@ repeat:
21915 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21916 pte += pte_ofs;
21917 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
21918- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
21919+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
21920 pgprot_t prot = PAGE_KERNEL;
21921 /*
21922 * first pass will use the same initial
21923@@ -337,7 +338,7 @@ repeat:
21924 */
21925 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
21926
21927- if (is_kernel_text(addr))
21928+ if (is_kernel_text(address, address + PAGE_SIZE))
21929 prot = PAGE_KERNEL_EXEC;
21930
21931 pages_4k++;
21932@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
21933
21934 pud = pud_offset(pgd, va);
21935 pmd = pmd_offset(pud, va);
21936- if (!pmd_present(*pmd))
21937+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
21938 break;
21939
21940 pte = pte_offset_kernel(pmd, va);
21941@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
21942
21943 static void __init pagetable_init(void)
21944 {
21945- pgd_t *pgd_base = swapper_pg_dir;
21946-
21947- permanent_kmaps_init(pgd_base);
21948+ permanent_kmaps_init(swapper_pg_dir);
21949 }
21950
21951 #ifdef CONFIG_ACPI_SLEEP
21952@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
21953 * ACPI suspend needs this for resume, because things like the intel-agp
21954 * driver might have split up a kernel 4MB mapping.
21955 */
21956-char swsusp_pg_dir[PAGE_SIZE]
21957+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
21958 __attribute__ ((aligned(PAGE_SIZE)));
21959
21960 static inline void save_pg_dir(void)
21961 {
21962- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
21963+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
21964 }
21965 #else /* !CONFIG_ACPI_SLEEP */
21966 static inline void save_pg_dir(void)
21967@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
21968 flush_tlb_all();
21969 }
21970
21971-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21972+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21973 EXPORT_SYMBOL_GPL(__supported_pte_mask);
21974
21975 /* user-defined highmem size */
21976@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
21977 * Initialize the boot-time allocator (with low memory only):
21978 */
21979 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
21980- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21981+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21982 PAGE_SIZE);
21983 if (bootmap == -1L)
21984 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
21985@@ -864,6 +863,12 @@ void __init mem_init(void)
21986
21987 pci_iommu_alloc();
21988
21989+#ifdef CONFIG_PAX_PER_CPU_PGD
21990+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21991+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21992+ KERNEL_PGD_PTRS);
21993+#endif
21994+
21995 #ifdef CONFIG_FLATMEM
21996 BUG_ON(!mem_map);
21997 #endif
21998@@ -881,7 +886,7 @@ void __init mem_init(void)
21999 set_highmem_pages_init();
22000
22001 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22002- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22003+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22004 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22005
22006 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
22007@@ -923,10 +928,10 @@ void __init mem_init(void)
22008 ((unsigned long)&__init_end -
22009 (unsigned long)&__init_begin) >> 10,
22010
22011- (unsigned long)&_etext, (unsigned long)&_edata,
22012- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22013+ (unsigned long)&_sdata, (unsigned long)&_edata,
22014+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22015
22016- (unsigned long)&_text, (unsigned long)&_etext,
22017+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22018 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22019
22020 /*
22021@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
22022 if (!kernel_set_to_readonly)
22023 return;
22024
22025+ start = ktla_ktva(start);
22026 pr_debug("Set kernel text: %lx - %lx for read write\n",
22027 start, start+size);
22028
22029@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
22030 if (!kernel_set_to_readonly)
22031 return;
22032
22033+ start = ktla_ktva(start);
22034 pr_debug("Set kernel text: %lx - %lx for read only\n",
22035 start, start+size);
22036
22037@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
22038 unsigned long start = PFN_ALIGN(_text);
22039 unsigned long size = PFN_ALIGN(_etext) - start;
22040
22041+ start = ktla_ktva(start);
22042 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22043 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22044 size >> 10);
22045diff -urNp linux-2.6.32.45/arch/x86/mm/init_64.c linux-2.6.32.45/arch/x86/mm/init_64.c
22046--- linux-2.6.32.45/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
22047+++ linux-2.6.32.45/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
22048@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
22049 pmd = fill_pmd(pud, vaddr);
22050 pte = fill_pte(pmd, vaddr);
22051
22052+ pax_open_kernel();
22053 set_pte(pte, new_pte);
22054+ pax_close_kernel();
22055
22056 /*
22057 * It's enough to flush this one mapping.
22058@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
22059 pgd = pgd_offset_k((unsigned long)__va(phys));
22060 if (pgd_none(*pgd)) {
22061 pud = (pud_t *) spp_getpage();
22062- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22063- _PAGE_USER));
22064+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22065 }
22066 pud = pud_offset(pgd, (unsigned long)__va(phys));
22067 if (pud_none(*pud)) {
22068 pmd = (pmd_t *) spp_getpage();
22069- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22070- _PAGE_USER));
22071+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22072 }
22073 pmd = pmd_offset(pud, phys);
22074 BUG_ON(!pmd_none(*pmd));
22075@@ -675,6 +675,12 @@ void __init mem_init(void)
22076
22077 pci_iommu_alloc();
22078
22079+#ifdef CONFIG_PAX_PER_CPU_PGD
22080+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22081+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22082+ KERNEL_PGD_PTRS);
22083+#endif
22084+
22085 /* clear_bss() already clear the empty_zero_page */
22086
22087 reservedpages = 0;
22088@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
22089 static struct vm_area_struct gate_vma = {
22090 .vm_start = VSYSCALL_START,
22091 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22092- .vm_page_prot = PAGE_READONLY_EXEC,
22093- .vm_flags = VM_READ | VM_EXEC
22094+ .vm_page_prot = PAGE_READONLY,
22095+ .vm_flags = VM_READ
22096 };
22097
22098 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
22099@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
22100
22101 const char *arch_vma_name(struct vm_area_struct *vma)
22102 {
22103- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22104+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22105 return "[vdso]";
22106 if (vma == &gate_vma)
22107 return "[vsyscall]";
22108diff -urNp linux-2.6.32.45/arch/x86/mm/init.c linux-2.6.32.45/arch/x86/mm/init.c
22109--- linux-2.6.32.45/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
22110+++ linux-2.6.32.45/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
22111@@ -69,11 +69,7 @@ static void __init find_early_table_spac
22112 * cause a hotspot and fill up ZONE_DMA. The page tables
22113 * need roughly 0.5KB per GB.
22114 */
22115-#ifdef CONFIG_X86_32
22116- start = 0x7000;
22117-#else
22118- start = 0x8000;
22119-#endif
22120+ start = 0x100000;
22121 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
22122 tables, PAGE_SIZE);
22123 if (e820_table_start == -1UL)
22124@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
22125 #endif
22126
22127 set_nx();
22128- if (nx_enabled)
22129+ if (nx_enabled && cpu_has_nx)
22130 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
22131
22132 /* Enable PSE if available */
22133@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
22134 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
22135 * mmio resources as well as potential bios/acpi data regions.
22136 */
22137+
22138 int devmem_is_allowed(unsigned long pagenr)
22139 {
22140+#ifdef CONFIG_GRKERNSEC_KMEM
22141+ /* allow BDA */
22142+ if (!pagenr)
22143+ return 1;
22144+ /* allow EBDA */
22145+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22146+ return 1;
22147+ /* allow ISA/video mem */
22148+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22149+ return 1;
22150+ /* throw out everything else below 1MB */
22151+ if (pagenr <= 256)
22152+ return 0;
22153+#else
22154 if (pagenr <= 256)
22155 return 1;
22156+#endif
22157+
22158 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22159 return 0;
22160 if (!page_is_ram(pagenr))
22161@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
22162
22163 void free_initmem(void)
22164 {
22165+
22166+#ifdef CONFIG_PAX_KERNEXEC
22167+#ifdef CONFIG_X86_32
22168+ /* PaX: limit KERNEL_CS to actual size */
22169+ unsigned long addr, limit;
22170+ struct desc_struct d;
22171+ int cpu;
22172+
22173+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22174+ limit = (limit - 1UL) >> PAGE_SHIFT;
22175+
22176+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22177+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
22178+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22179+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22180+ }
22181+
22182+ /* PaX: make KERNEL_CS read-only */
22183+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22184+ if (!paravirt_enabled())
22185+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22186+/*
22187+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22188+ pgd = pgd_offset_k(addr);
22189+ pud = pud_offset(pgd, addr);
22190+ pmd = pmd_offset(pud, addr);
22191+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22192+ }
22193+*/
22194+#ifdef CONFIG_X86_PAE
22195+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22196+/*
22197+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22198+ pgd = pgd_offset_k(addr);
22199+ pud = pud_offset(pgd, addr);
22200+ pmd = pmd_offset(pud, addr);
22201+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22202+ }
22203+*/
22204+#endif
22205+
22206+#ifdef CONFIG_MODULES
22207+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22208+#endif
22209+
22210+#else
22211+ pgd_t *pgd;
22212+ pud_t *pud;
22213+ pmd_t *pmd;
22214+ unsigned long addr, end;
22215+
22216+ /* PaX: make kernel code/rodata read-only, rest non-executable */
22217+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22218+ pgd = pgd_offset_k(addr);
22219+ pud = pud_offset(pgd, addr);
22220+ pmd = pmd_offset(pud, addr);
22221+ if (!pmd_present(*pmd))
22222+ continue;
22223+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22224+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22225+ else
22226+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22227+ }
22228+
22229+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22230+ end = addr + KERNEL_IMAGE_SIZE;
22231+ for (; addr < end; addr += PMD_SIZE) {
22232+ pgd = pgd_offset_k(addr);
22233+ pud = pud_offset(pgd, addr);
22234+ pmd = pmd_offset(pud, addr);
22235+ if (!pmd_present(*pmd))
22236+ continue;
22237+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22238+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22239+ }
22240+#endif
22241+
22242+ flush_tlb_all();
22243+#endif
22244+
22245 free_init_pages("unused kernel memory",
22246 (unsigned long)(&__init_begin),
22247 (unsigned long)(&__init_end));
22248diff -urNp linux-2.6.32.45/arch/x86/mm/iomap_32.c linux-2.6.32.45/arch/x86/mm/iomap_32.c
22249--- linux-2.6.32.45/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
22250+++ linux-2.6.32.45/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
22251@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
22252 debug_kmap_atomic(type);
22253 idx = type + KM_TYPE_NR * smp_processor_id();
22254 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22255+
22256+ pax_open_kernel();
22257 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22258+ pax_close_kernel();
22259+
22260 arch_flush_lazy_mmu_mode();
22261
22262 return (void *)vaddr;
22263diff -urNp linux-2.6.32.45/arch/x86/mm/ioremap.c linux-2.6.32.45/arch/x86/mm/ioremap.c
22264--- linux-2.6.32.45/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
22265+++ linux-2.6.32.45/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
22266@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
22267 * Second special case: Some BIOSen report the PC BIOS
22268 * area (640->1Mb) as ram even though it is not.
22269 */
22270- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
22271- pagenr < (BIOS_END >> PAGE_SHIFT))
22272+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
22273+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22274 return 0;
22275
22276 for (i = 0; i < e820.nr_map; i++) {
22277@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
22278 /*
22279 * Don't allow anybody to remap normal RAM that we're using..
22280 */
22281- for (pfn = phys_addr >> PAGE_SHIFT;
22282- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
22283- pfn++) {
22284-
22285+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
22286 int is_ram = page_is_ram(pfn);
22287
22288- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22289+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22290 return NULL;
22291 WARN_ON_ONCE(is_ram);
22292 }
22293@@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
22294 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22295
22296 static __initdata int after_paging_init;
22297-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22298+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22299
22300 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22301 {
22302@@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
22303 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22304
22305 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22306- memset(bm_pte, 0, sizeof(bm_pte));
22307- pmd_populate_kernel(&init_mm, pmd, bm_pte);
22308+ pmd_populate_user(&init_mm, pmd, bm_pte);
22309
22310 /*
22311 * The boot-ioremap range spans multiple pmds, for which
22312diff -urNp linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c
22313--- linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
22314+++ linux-2.6.32.45/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
22315@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
22316 * memory (e.g. tracked pages)? For now, we need this to avoid
22317 * invoking kmemcheck for PnP BIOS calls.
22318 */
22319- if (regs->flags & X86_VM_MASK)
22320+ if (v8086_mode(regs))
22321 return false;
22322- if (regs->cs != __KERNEL_CS)
22323+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
22324 return false;
22325
22326 pte = kmemcheck_pte_lookup(address);
22327diff -urNp linux-2.6.32.45/arch/x86/mm/mmap.c linux-2.6.32.45/arch/x86/mm/mmap.c
22328--- linux-2.6.32.45/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
22329+++ linux-2.6.32.45/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
22330@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
22331 * Leave an at least ~128 MB hole with possible stack randomization.
22332 */
22333 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
22334-#define MAX_GAP (TASK_SIZE/6*5)
22335+#define MAX_GAP (pax_task_size/6*5)
22336
22337 /*
22338 * True on X86_32 or when emulating IA32 on X86_64
22339@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
22340 return rnd << PAGE_SHIFT;
22341 }
22342
22343-static unsigned long mmap_base(void)
22344+static unsigned long mmap_base(struct mm_struct *mm)
22345 {
22346 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
22347+ unsigned long pax_task_size = TASK_SIZE;
22348+
22349+#ifdef CONFIG_PAX_SEGMEXEC
22350+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22351+ pax_task_size = SEGMEXEC_TASK_SIZE;
22352+#endif
22353
22354 if (gap < MIN_GAP)
22355 gap = MIN_GAP;
22356 else if (gap > MAX_GAP)
22357 gap = MAX_GAP;
22358
22359- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
22360+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
22361 }
22362
22363 /*
22364 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
22365 * does, but not when emulating X86_32
22366 */
22367-static unsigned long mmap_legacy_base(void)
22368+static unsigned long mmap_legacy_base(struct mm_struct *mm)
22369 {
22370- if (mmap_is_ia32())
22371+ if (mmap_is_ia32()) {
22372+
22373+#ifdef CONFIG_PAX_SEGMEXEC
22374+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22375+ return SEGMEXEC_TASK_UNMAPPED_BASE;
22376+ else
22377+#endif
22378+
22379 return TASK_UNMAPPED_BASE;
22380- else
22381+ } else
22382 return TASK_UNMAPPED_BASE + mmap_rnd();
22383 }
22384
22385@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
22386 void arch_pick_mmap_layout(struct mm_struct *mm)
22387 {
22388 if (mmap_is_legacy()) {
22389- mm->mmap_base = mmap_legacy_base();
22390+ mm->mmap_base = mmap_legacy_base(mm);
22391+
22392+#ifdef CONFIG_PAX_RANDMMAP
22393+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22394+ mm->mmap_base += mm->delta_mmap;
22395+#endif
22396+
22397 mm->get_unmapped_area = arch_get_unmapped_area;
22398 mm->unmap_area = arch_unmap_area;
22399 } else {
22400- mm->mmap_base = mmap_base();
22401+ mm->mmap_base = mmap_base(mm);
22402+
22403+#ifdef CONFIG_PAX_RANDMMAP
22404+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22405+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
22406+#endif
22407+
22408 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
22409 mm->unmap_area = arch_unmap_area_topdown;
22410 }
22411diff -urNp linux-2.6.32.45/arch/x86/mm/mmio-mod.c linux-2.6.32.45/arch/x86/mm/mmio-mod.c
22412--- linux-2.6.32.45/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
22413+++ linux-2.6.32.45/arch/x86/mm/mmio-mod.c 2011-07-06 19:53:33.000000000 -0400
22414@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, s
22415 break;
22416 default:
22417 {
22418- unsigned char *ip = (unsigned char *)instptr;
22419+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
22420 my_trace->opcode = MMIO_UNKNOWN_OP;
22421 my_trace->width = 0;
22422 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
22423@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
22424 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
22425 void __iomem *addr)
22426 {
22427- static atomic_t next_id;
22428+ static atomic_unchecked_t next_id;
22429 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
22430 /* These are page-unaligned. */
22431 struct mmiotrace_map map = {
22432@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
22433 .private = trace
22434 },
22435 .phys = offset,
22436- .id = atomic_inc_return(&next_id)
22437+ .id = atomic_inc_return_unchecked(&next_id)
22438 };
22439 map.map_id = trace->id;
22440
22441diff -urNp linux-2.6.32.45/arch/x86/mm/numa_32.c linux-2.6.32.45/arch/x86/mm/numa_32.c
22442--- linux-2.6.32.45/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
22443+++ linux-2.6.32.45/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
22444@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
22445 }
22446 #endif
22447
22448-extern unsigned long find_max_low_pfn(void);
22449 extern unsigned long highend_pfn, highstart_pfn;
22450
22451 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
22452diff -urNp linux-2.6.32.45/arch/x86/mm/pageattr.c linux-2.6.32.45/arch/x86/mm/pageattr.c
22453--- linux-2.6.32.45/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
22454+++ linux-2.6.32.45/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
22455@@ -261,16 +261,17 @@ static inline pgprot_t static_protection
22456 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
22457 */
22458 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
22459- pgprot_val(forbidden) |= _PAGE_NX;
22460+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22461
22462 /*
22463 * The kernel text needs to be executable for obvious reasons
22464 * Does not cover __inittext since that is gone later on. On
22465 * 64bit we do not enforce !NX on the low mapping
22466 */
22467- if (within(address, (unsigned long)_text, (unsigned long)_etext))
22468- pgprot_val(forbidden) |= _PAGE_NX;
22469+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
22470+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22471
22472+#ifdef CONFIG_DEBUG_RODATA
22473 /*
22474 * The .rodata section needs to be read-only. Using the pfn
22475 * catches all aliases.
22476@@ -278,6 +279,14 @@ static inline pgprot_t static_protection
22477 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
22478 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
22479 pgprot_val(forbidden) |= _PAGE_RW;
22480+#endif
22481+
22482+#ifdef CONFIG_PAX_KERNEXEC
22483+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
22484+ pgprot_val(forbidden) |= _PAGE_RW;
22485+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22486+ }
22487+#endif
22488
22489 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
22490
22491@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
22492 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
22493 {
22494 /* change init_mm */
22495+ pax_open_kernel();
22496 set_pte_atomic(kpte, pte);
22497+
22498 #ifdef CONFIG_X86_32
22499 if (!SHARED_KERNEL_PMD) {
22500+
22501+#ifdef CONFIG_PAX_PER_CPU_PGD
22502+ unsigned long cpu;
22503+#else
22504 struct page *page;
22505+#endif
22506
22507+#ifdef CONFIG_PAX_PER_CPU_PGD
22508+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22509+ pgd_t *pgd = get_cpu_pgd(cpu);
22510+#else
22511 list_for_each_entry(page, &pgd_list, lru) {
22512- pgd_t *pgd;
22513+ pgd_t *pgd = (pgd_t *)page_address(page);
22514+#endif
22515+
22516 pud_t *pud;
22517 pmd_t *pmd;
22518
22519- pgd = (pgd_t *)page_address(page) + pgd_index(address);
22520+ pgd += pgd_index(address);
22521 pud = pud_offset(pgd, address);
22522 pmd = pmd_offset(pud, address);
22523 set_pte_atomic((pte_t *)pmd, pte);
22524 }
22525 }
22526 #endif
22527+ pax_close_kernel();
22528 }
22529
22530 static int
22531diff -urNp linux-2.6.32.45/arch/x86/mm/pageattr-test.c linux-2.6.32.45/arch/x86/mm/pageattr-test.c
22532--- linux-2.6.32.45/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
22533+++ linux-2.6.32.45/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
22534@@ -36,7 +36,7 @@ enum {
22535
22536 static int pte_testbit(pte_t pte)
22537 {
22538- return pte_flags(pte) & _PAGE_UNUSED1;
22539+ return pte_flags(pte) & _PAGE_CPA_TEST;
22540 }
22541
22542 struct split_state {
22543diff -urNp linux-2.6.32.45/arch/x86/mm/pat.c linux-2.6.32.45/arch/x86/mm/pat.c
22544--- linux-2.6.32.45/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
22545+++ linux-2.6.32.45/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
22546@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
22547
22548 conflict:
22549 printk(KERN_INFO "%s:%d conflicting memory types "
22550- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
22551+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
22552 new->end, cattr_name(new->type), cattr_name(entry->type));
22553 return -EBUSY;
22554 }
22555@@ -559,7 +559,7 @@ unlock_ret:
22556
22557 if (err) {
22558 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
22559- current->comm, current->pid, start, end);
22560+ current->comm, task_pid_nr(current), start, end);
22561 }
22562
22563 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
22564@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
22565 while (cursor < to) {
22566 if (!devmem_is_allowed(pfn)) {
22567 printk(KERN_INFO
22568- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
22569- current->comm, from, to);
22570+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
22571+ current->comm, from, to, cursor);
22572 return 0;
22573 }
22574 cursor += PAGE_SIZE;
22575@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
22576 printk(KERN_INFO
22577 "%s:%d ioremap_change_attr failed %s "
22578 "for %Lx-%Lx\n",
22579- current->comm, current->pid,
22580+ current->comm, task_pid_nr(current),
22581 cattr_name(flags),
22582 base, (unsigned long long)(base + size));
22583 return -EINVAL;
22584@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
22585 free_memtype(paddr, paddr + size);
22586 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
22587 " for %Lx-%Lx, got %s\n",
22588- current->comm, current->pid,
22589+ current->comm, task_pid_nr(current),
22590 cattr_name(want_flags),
22591 (unsigned long long)paddr,
22592 (unsigned long long)(paddr + size),
22593diff -urNp linux-2.6.32.45/arch/x86/mm/pf_in.c linux-2.6.32.45/arch/x86/mm/pf_in.c
22594--- linux-2.6.32.45/arch/x86/mm/pf_in.c 2011-03-27 14:31:47.000000000 -0400
22595+++ linux-2.6.32.45/arch/x86/mm/pf_in.c 2011-07-06 19:53:33.000000000 -0400
22596@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
22597 int i;
22598 enum reason_type rv = OTHERS;
22599
22600- p = (unsigned char *)ins_addr;
22601+ p = (unsigned char *)ktla_ktva(ins_addr);
22602 p += skip_prefix(p, &prf);
22603 p += get_opcode(p, &opcode);
22604
22605@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
22606 struct prefix_bits prf;
22607 int i;
22608
22609- p = (unsigned char *)ins_addr;
22610+ p = (unsigned char *)ktla_ktva(ins_addr);
22611 p += skip_prefix(p, &prf);
22612 p += get_opcode(p, &opcode);
22613
22614@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
22615 struct prefix_bits prf;
22616 int i;
22617
22618- p = (unsigned char *)ins_addr;
22619+ p = (unsigned char *)ktla_ktva(ins_addr);
22620 p += skip_prefix(p, &prf);
22621 p += get_opcode(p, &opcode);
22622
22623@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned l
22624 int i;
22625 unsigned long rv;
22626
22627- p = (unsigned char *)ins_addr;
22628+ p = (unsigned char *)ktla_ktva(ins_addr);
22629 p += skip_prefix(p, &prf);
22630 p += get_opcode(p, &opcode);
22631 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
22632@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned l
22633 int i;
22634 unsigned long rv;
22635
22636- p = (unsigned char *)ins_addr;
22637+ p = (unsigned char *)ktla_ktva(ins_addr);
22638 p += skip_prefix(p, &prf);
22639 p += get_opcode(p, &opcode);
22640 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
22641diff -urNp linux-2.6.32.45/arch/x86/mm/pgtable_32.c linux-2.6.32.45/arch/x86/mm/pgtable_32.c
22642--- linux-2.6.32.45/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
22643+++ linux-2.6.32.45/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
22644@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
22645 return;
22646 }
22647 pte = pte_offset_kernel(pmd, vaddr);
22648+
22649+ pax_open_kernel();
22650 if (pte_val(pteval))
22651 set_pte_at(&init_mm, vaddr, pte, pteval);
22652 else
22653 pte_clear(&init_mm, vaddr, pte);
22654+ pax_close_kernel();
22655
22656 /*
22657 * It's enough to flush this one mapping.
22658diff -urNp linux-2.6.32.45/arch/x86/mm/pgtable.c linux-2.6.32.45/arch/x86/mm/pgtable.c
22659--- linux-2.6.32.45/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
22660+++ linux-2.6.32.45/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
22661@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
22662 list_del(&page->lru);
22663 }
22664
22665-#define UNSHARED_PTRS_PER_PGD \
22666- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22667+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22668+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
22669
22670+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22671+{
22672+ while (count--)
22673+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
22674+}
22675+#endif
22676+
22677+#ifdef CONFIG_PAX_PER_CPU_PGD
22678+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22679+{
22680+ while (count--)
22681+
22682+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22683+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
22684+#else
22685+ *dst++ = *src++;
22686+#endif
22687+
22688+}
22689+#endif
22690+
22691+#ifdef CONFIG_X86_64
22692+#define pxd_t pud_t
22693+#define pyd_t pgd_t
22694+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
22695+#define pxd_free(mm, pud) pud_free((mm), (pud))
22696+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
22697+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
22698+#define PYD_SIZE PGDIR_SIZE
22699+#else
22700+#define pxd_t pmd_t
22701+#define pyd_t pud_t
22702+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
22703+#define pxd_free(mm, pud) pmd_free((mm), (pud))
22704+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
22705+#define pyd_offset(mm ,address) pud_offset((mm), (address))
22706+#define PYD_SIZE PUD_SIZE
22707+#endif
22708+
22709+#ifdef CONFIG_PAX_PER_CPU_PGD
22710+static inline void pgd_ctor(pgd_t *pgd) {}
22711+static inline void pgd_dtor(pgd_t *pgd) {}
22712+#else
22713 static void pgd_ctor(pgd_t *pgd)
22714 {
22715 /* If the pgd points to a shared pagetable level (either the
22716@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
22717 pgd_list_del(pgd);
22718 spin_unlock_irqrestore(&pgd_lock, flags);
22719 }
22720+#endif
22721
22722 /*
22723 * List of all pgd's needed for non-PAE so it can invalidate entries
22724@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
22725 * -- wli
22726 */
22727
22728-#ifdef CONFIG_X86_PAE
22729+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22730 /*
22731 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
22732 * updating the top-level pagetable entries to guarantee the
22733@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
22734 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
22735 * and initialize the kernel pmds here.
22736 */
22737-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
22738+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22739
22740 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
22741 {
22742@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
22743 */
22744 flush_tlb_mm(mm);
22745 }
22746+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
22747+#define PREALLOCATED_PXDS USER_PGD_PTRS
22748 #else /* !CONFIG_X86_PAE */
22749
22750 /* No need to prepopulate any pagetable entries in non-PAE modes. */
22751-#define PREALLOCATED_PMDS 0
22752+#define PREALLOCATED_PXDS 0
22753
22754 #endif /* CONFIG_X86_PAE */
22755
22756-static void free_pmds(pmd_t *pmds[])
22757+static void free_pxds(pxd_t *pxds[])
22758 {
22759 int i;
22760
22761- for(i = 0; i < PREALLOCATED_PMDS; i++)
22762- if (pmds[i])
22763- free_page((unsigned long)pmds[i]);
22764+ for(i = 0; i < PREALLOCATED_PXDS; i++)
22765+ if (pxds[i])
22766+ free_page((unsigned long)pxds[i]);
22767 }
22768
22769-static int preallocate_pmds(pmd_t *pmds[])
22770+static int preallocate_pxds(pxd_t *pxds[])
22771 {
22772 int i;
22773 bool failed = false;
22774
22775- for(i = 0; i < PREALLOCATED_PMDS; i++) {
22776- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
22777- if (pmd == NULL)
22778+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
22779+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
22780+ if (pxd == NULL)
22781 failed = true;
22782- pmds[i] = pmd;
22783+ pxds[i] = pxd;
22784 }
22785
22786 if (failed) {
22787- free_pmds(pmds);
22788+ free_pxds(pxds);
22789 return -ENOMEM;
22790 }
22791
22792@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
22793 * preallocate which never got a corresponding vma will need to be
22794 * freed manually.
22795 */
22796-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
22797+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
22798 {
22799 int i;
22800
22801- for(i = 0; i < PREALLOCATED_PMDS; i++) {
22802+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
22803 pgd_t pgd = pgdp[i];
22804
22805 if (pgd_val(pgd) != 0) {
22806- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
22807+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
22808
22809- pgdp[i] = native_make_pgd(0);
22810+ set_pgd(pgdp + i, native_make_pgd(0));
22811
22812- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
22813- pmd_free(mm, pmd);
22814+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
22815+ pxd_free(mm, pxd);
22816 }
22817 }
22818 }
22819
22820-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
22821+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
22822 {
22823- pud_t *pud;
22824+ pyd_t *pyd;
22825 unsigned long addr;
22826 int i;
22827
22828- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
22829+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
22830 return;
22831
22832- pud = pud_offset(pgd, 0);
22833+#ifdef CONFIG_X86_64
22834+ pyd = pyd_offset(mm, 0L);
22835+#else
22836+ pyd = pyd_offset(pgd, 0L);
22837+#endif
22838
22839- for (addr = i = 0; i < PREALLOCATED_PMDS;
22840- i++, pud++, addr += PUD_SIZE) {
22841- pmd_t *pmd = pmds[i];
22842+ for (addr = i = 0; i < PREALLOCATED_PXDS;
22843+ i++, pyd++, addr += PYD_SIZE) {
22844+ pxd_t *pxd = pxds[i];
22845
22846 if (i >= KERNEL_PGD_BOUNDARY)
22847- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22848- sizeof(pmd_t) * PTRS_PER_PMD);
22849+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22850+ sizeof(pxd_t) * PTRS_PER_PMD);
22851
22852- pud_populate(mm, pud, pmd);
22853+ pyd_populate(mm, pyd, pxd);
22854 }
22855 }
22856
22857 pgd_t *pgd_alloc(struct mm_struct *mm)
22858 {
22859 pgd_t *pgd;
22860- pmd_t *pmds[PREALLOCATED_PMDS];
22861+ pxd_t *pxds[PREALLOCATED_PXDS];
22862+
22863 unsigned long flags;
22864
22865 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
22866@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22867
22868 mm->pgd = pgd;
22869
22870- if (preallocate_pmds(pmds) != 0)
22871+ if (preallocate_pxds(pxds) != 0)
22872 goto out_free_pgd;
22873
22874 if (paravirt_pgd_alloc(mm) != 0)
22875- goto out_free_pmds;
22876+ goto out_free_pxds;
22877
22878 /*
22879 * Make sure that pre-populating the pmds is atomic with
22880@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22881 spin_lock_irqsave(&pgd_lock, flags);
22882
22883 pgd_ctor(pgd);
22884- pgd_prepopulate_pmd(mm, pgd, pmds);
22885+ pgd_prepopulate_pxd(mm, pgd, pxds);
22886
22887 spin_unlock_irqrestore(&pgd_lock, flags);
22888
22889 return pgd;
22890
22891-out_free_pmds:
22892- free_pmds(pmds);
22893+out_free_pxds:
22894+ free_pxds(pxds);
22895 out_free_pgd:
22896 free_page((unsigned long)pgd);
22897 out:
22898@@ -287,7 +338,7 @@ out:
22899
22900 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
22901 {
22902- pgd_mop_up_pmds(mm, pgd);
22903+ pgd_mop_up_pxds(mm, pgd);
22904 pgd_dtor(pgd);
22905 paravirt_pgd_free(mm, pgd);
22906 free_page((unsigned long)pgd);
22907diff -urNp linux-2.6.32.45/arch/x86/mm/setup_nx.c linux-2.6.32.45/arch/x86/mm/setup_nx.c
22908--- linux-2.6.32.45/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
22909+++ linux-2.6.32.45/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
22910@@ -4,11 +4,10 @@
22911
22912 #include <asm/pgtable.h>
22913
22914+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22915 int nx_enabled;
22916
22917-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22918-static int disable_nx __cpuinitdata;
22919-
22920+#ifndef CONFIG_PAX_PAGEEXEC
22921 /*
22922 * noexec = on|off
22923 *
22924@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
22925 if (!str)
22926 return -EINVAL;
22927 if (!strncmp(str, "on", 2)) {
22928- __supported_pte_mask |= _PAGE_NX;
22929- disable_nx = 0;
22930+ nx_enabled = 1;
22931 } else if (!strncmp(str, "off", 3)) {
22932- disable_nx = 1;
22933- __supported_pte_mask &= ~_PAGE_NX;
22934+ nx_enabled = 0;
22935 }
22936 return 0;
22937 }
22938 early_param("noexec", noexec_setup);
22939 #endif
22940+#endif
22941
22942 #ifdef CONFIG_X86_PAE
22943 void __init set_nx(void)
22944 {
22945- unsigned int v[4], l, h;
22946+ if (!nx_enabled && cpu_has_nx) {
22947+ unsigned l, h;
22948
22949- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
22950- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
22951-
22952- if ((v[3] & (1 << 20)) && !disable_nx) {
22953- rdmsr(MSR_EFER, l, h);
22954- l |= EFER_NX;
22955- wrmsr(MSR_EFER, l, h);
22956- nx_enabled = 1;
22957- __supported_pte_mask |= _PAGE_NX;
22958- }
22959+ __supported_pte_mask &= ~_PAGE_NX;
22960+ rdmsr(MSR_EFER, l, h);
22961+ l &= ~EFER_NX;
22962+ wrmsr(MSR_EFER, l, h);
22963 }
22964 }
22965 #else
22966@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
22967 unsigned long efer;
22968
22969 rdmsrl(MSR_EFER, efer);
22970- if (!(efer & EFER_NX) || disable_nx)
22971+ if (!(efer & EFER_NX) || !nx_enabled)
22972 __supported_pte_mask &= ~_PAGE_NX;
22973 }
22974 #endif
22975diff -urNp linux-2.6.32.45/arch/x86/mm/tlb.c linux-2.6.32.45/arch/x86/mm/tlb.c
22976--- linux-2.6.32.45/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
22977+++ linux-2.6.32.45/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
22978@@ -61,7 +61,11 @@ void leave_mm(int cpu)
22979 BUG();
22980 cpumask_clear_cpu(cpu,
22981 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
22982+
22983+#ifndef CONFIG_PAX_PER_CPU_PGD
22984 load_cr3(swapper_pg_dir);
22985+#endif
22986+
22987 }
22988 EXPORT_SYMBOL_GPL(leave_mm);
22989
22990diff -urNp linux-2.6.32.45/arch/x86/oprofile/backtrace.c linux-2.6.32.45/arch/x86/oprofile/backtrace.c
22991--- linux-2.6.32.45/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
22992+++ linux-2.6.32.45/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
22993@@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
22994 struct frame_head bufhead[2];
22995
22996 /* Also check accessibility of one struct frame_head beyond */
22997- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
22998+ if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
22999 return NULL;
23000 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
23001 return NULL;
23002@@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
23003 {
23004 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
23005
23006- if (!user_mode_vm(regs)) {
23007+ if (!user_mode(regs)) {
23008 unsigned long stack = kernel_stack_pointer(regs);
23009 if (depth)
23010 dump_trace(NULL, regs, (unsigned long *)stack, 0,
23011diff -urNp linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c
23012--- linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
23013+++ linux-2.6.32.45/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
23014@@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
23015 #endif
23016 }
23017
23018-static int inline addr_increment(void)
23019+static inline int addr_increment(void)
23020 {
23021 #ifdef CONFIG_SMP
23022 return smp_num_siblings == 2 ? 2 : 1;
23023diff -urNp linux-2.6.32.45/arch/x86/pci/common.c linux-2.6.32.45/arch/x86/pci/common.c
23024--- linux-2.6.32.45/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
23025+++ linux-2.6.32.45/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
23026@@ -31,8 +31,8 @@ int noioapicreroute = 1;
23027 int pcibios_last_bus = -1;
23028 unsigned long pirq_table_addr;
23029 struct pci_bus *pci_root_bus;
23030-struct pci_raw_ops *raw_pci_ops;
23031-struct pci_raw_ops *raw_pci_ext_ops;
23032+const struct pci_raw_ops *raw_pci_ops;
23033+const struct pci_raw_ops *raw_pci_ext_ops;
23034
23035 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
23036 int reg, int len, u32 *val)
23037diff -urNp linux-2.6.32.45/arch/x86/pci/direct.c linux-2.6.32.45/arch/x86/pci/direct.c
23038--- linux-2.6.32.45/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
23039+++ linux-2.6.32.45/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
23040@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
23041
23042 #undef PCI_CONF1_ADDRESS
23043
23044-struct pci_raw_ops pci_direct_conf1 = {
23045+const struct pci_raw_ops pci_direct_conf1 = {
23046 .read = pci_conf1_read,
23047 .write = pci_conf1_write,
23048 };
23049@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
23050
23051 #undef PCI_CONF2_ADDRESS
23052
23053-struct pci_raw_ops pci_direct_conf2 = {
23054+const struct pci_raw_ops pci_direct_conf2 = {
23055 .read = pci_conf2_read,
23056 .write = pci_conf2_write,
23057 };
23058@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
23059 * This should be close to trivial, but it isn't, because there are buggy
23060 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
23061 */
23062-static int __init pci_sanity_check(struct pci_raw_ops *o)
23063+static int __init pci_sanity_check(const struct pci_raw_ops *o)
23064 {
23065 u32 x = 0;
23066 int year, devfn;
23067diff -urNp linux-2.6.32.45/arch/x86/pci/mmconfig_32.c linux-2.6.32.45/arch/x86/pci/mmconfig_32.c
23068--- linux-2.6.32.45/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
23069+++ linux-2.6.32.45/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
23070@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
23071 return 0;
23072 }
23073
23074-static struct pci_raw_ops pci_mmcfg = {
23075+static const struct pci_raw_ops pci_mmcfg = {
23076 .read = pci_mmcfg_read,
23077 .write = pci_mmcfg_write,
23078 };
23079diff -urNp linux-2.6.32.45/arch/x86/pci/mmconfig_64.c linux-2.6.32.45/arch/x86/pci/mmconfig_64.c
23080--- linux-2.6.32.45/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
23081+++ linux-2.6.32.45/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
23082@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
23083 return 0;
23084 }
23085
23086-static struct pci_raw_ops pci_mmcfg = {
23087+static const struct pci_raw_ops pci_mmcfg = {
23088 .read = pci_mmcfg_read,
23089 .write = pci_mmcfg_write,
23090 };
23091diff -urNp linux-2.6.32.45/arch/x86/pci/numaq_32.c linux-2.6.32.45/arch/x86/pci/numaq_32.c
23092--- linux-2.6.32.45/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
23093+++ linux-2.6.32.45/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
23094@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
23095
23096 #undef PCI_CONF1_MQ_ADDRESS
23097
23098-static struct pci_raw_ops pci_direct_conf1_mq = {
23099+static const struct pci_raw_ops pci_direct_conf1_mq = {
23100 .read = pci_conf1_mq_read,
23101 .write = pci_conf1_mq_write
23102 };
23103diff -urNp linux-2.6.32.45/arch/x86/pci/olpc.c linux-2.6.32.45/arch/x86/pci/olpc.c
23104--- linux-2.6.32.45/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
23105+++ linux-2.6.32.45/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
23106@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
23107 return 0;
23108 }
23109
23110-static struct pci_raw_ops pci_olpc_conf = {
23111+static const struct pci_raw_ops pci_olpc_conf = {
23112 .read = pci_olpc_read,
23113 .write = pci_olpc_write,
23114 };
23115diff -urNp linux-2.6.32.45/arch/x86/pci/pcbios.c linux-2.6.32.45/arch/x86/pci/pcbios.c
23116--- linux-2.6.32.45/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
23117+++ linux-2.6.32.45/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
23118@@ -56,50 +56,93 @@ union bios32 {
23119 static struct {
23120 unsigned long address;
23121 unsigned short segment;
23122-} bios32_indirect = { 0, __KERNEL_CS };
23123+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23124
23125 /*
23126 * Returns the entry point for the given service, NULL on error
23127 */
23128
23129-static unsigned long bios32_service(unsigned long service)
23130+static unsigned long __devinit bios32_service(unsigned long service)
23131 {
23132 unsigned char return_code; /* %al */
23133 unsigned long address; /* %ebx */
23134 unsigned long length; /* %ecx */
23135 unsigned long entry; /* %edx */
23136 unsigned long flags;
23137+ struct desc_struct d, *gdt;
23138
23139 local_irq_save(flags);
23140- __asm__("lcall *(%%edi); cld"
23141+
23142+ gdt = get_cpu_gdt_table(smp_processor_id());
23143+
23144+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23145+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23146+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23147+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23148+
23149+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23150 : "=a" (return_code),
23151 "=b" (address),
23152 "=c" (length),
23153 "=d" (entry)
23154 : "0" (service),
23155 "1" (0),
23156- "D" (&bios32_indirect));
23157+ "D" (&bios32_indirect),
23158+ "r"(__PCIBIOS_DS)
23159+ : "memory");
23160+
23161+ pax_open_kernel();
23162+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23163+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23164+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23165+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23166+ pax_close_kernel();
23167+
23168 local_irq_restore(flags);
23169
23170 switch (return_code) {
23171- case 0:
23172- return address + entry;
23173- case 0x80: /* Not present */
23174- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23175- return 0;
23176- default: /* Shouldn't happen */
23177- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23178- service, return_code);
23179+ case 0: {
23180+ int cpu;
23181+ unsigned char flags;
23182+
23183+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23184+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23185+ printk(KERN_WARNING "bios32_service: not valid\n");
23186 return 0;
23187+ }
23188+ address = address + PAGE_OFFSET;
23189+ length += 16UL; /* some BIOSs underreport this... */
23190+ flags = 4;
23191+ if (length >= 64*1024*1024) {
23192+ length >>= PAGE_SHIFT;
23193+ flags |= 8;
23194+ }
23195+
23196+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
23197+ gdt = get_cpu_gdt_table(cpu);
23198+ pack_descriptor(&d, address, length, 0x9b, flags);
23199+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23200+ pack_descriptor(&d, address, length, 0x93, flags);
23201+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23202+ }
23203+ return entry;
23204+ }
23205+ case 0x80: /* Not present */
23206+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23207+ return 0;
23208+ default: /* Shouldn't happen */
23209+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23210+ service, return_code);
23211+ return 0;
23212 }
23213 }
23214
23215 static struct {
23216 unsigned long address;
23217 unsigned short segment;
23218-} pci_indirect = { 0, __KERNEL_CS };
23219+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23220
23221-static int pci_bios_present;
23222+static int pci_bios_present __read_only;
23223
23224 static int __devinit check_pcibios(void)
23225 {
23226@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
23227 unsigned long flags, pcibios_entry;
23228
23229 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23230- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23231+ pci_indirect.address = pcibios_entry;
23232
23233 local_irq_save(flags);
23234- __asm__(
23235- "lcall *(%%edi); cld\n\t"
23236+ __asm__("movw %w6, %%ds\n\t"
23237+ "lcall *%%ss:(%%edi); cld\n\t"
23238+ "push %%ss\n\t"
23239+ "pop %%ds\n\t"
23240 "jc 1f\n\t"
23241 "xor %%ah, %%ah\n"
23242 "1:"
23243@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
23244 "=b" (ebx),
23245 "=c" (ecx)
23246 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23247- "D" (&pci_indirect)
23248+ "D" (&pci_indirect),
23249+ "r" (__PCIBIOS_DS)
23250 : "memory");
23251 local_irq_restore(flags);
23252
23253@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
23254
23255 switch (len) {
23256 case 1:
23257- __asm__("lcall *(%%esi); cld\n\t"
23258+ __asm__("movw %w6, %%ds\n\t"
23259+ "lcall *%%ss:(%%esi); cld\n\t"
23260+ "push %%ss\n\t"
23261+ "pop %%ds\n\t"
23262 "jc 1f\n\t"
23263 "xor %%ah, %%ah\n"
23264 "1:"
23265@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
23266 : "1" (PCIBIOS_READ_CONFIG_BYTE),
23267 "b" (bx),
23268 "D" ((long)reg),
23269- "S" (&pci_indirect));
23270+ "S" (&pci_indirect),
23271+ "r" (__PCIBIOS_DS));
23272 /*
23273 * Zero-extend the result beyond 8 bits, do not trust the
23274 * BIOS having done it:
23275@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
23276 *value &= 0xff;
23277 break;
23278 case 2:
23279- __asm__("lcall *(%%esi); cld\n\t"
23280+ __asm__("movw %w6, %%ds\n\t"
23281+ "lcall *%%ss:(%%esi); cld\n\t"
23282+ "push %%ss\n\t"
23283+ "pop %%ds\n\t"
23284 "jc 1f\n\t"
23285 "xor %%ah, %%ah\n"
23286 "1:"
23287@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
23288 : "1" (PCIBIOS_READ_CONFIG_WORD),
23289 "b" (bx),
23290 "D" ((long)reg),
23291- "S" (&pci_indirect));
23292+ "S" (&pci_indirect),
23293+ "r" (__PCIBIOS_DS));
23294 /*
23295 * Zero-extend the result beyond 16 bits, do not trust the
23296 * BIOS having done it:
23297@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
23298 *value &= 0xffff;
23299 break;
23300 case 4:
23301- __asm__("lcall *(%%esi); cld\n\t"
23302+ __asm__("movw %w6, %%ds\n\t"
23303+ "lcall *%%ss:(%%esi); cld\n\t"
23304+ "push %%ss\n\t"
23305+ "pop %%ds\n\t"
23306 "jc 1f\n\t"
23307 "xor %%ah, %%ah\n"
23308 "1:"
23309@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
23310 : "1" (PCIBIOS_READ_CONFIG_DWORD),
23311 "b" (bx),
23312 "D" ((long)reg),
23313- "S" (&pci_indirect));
23314+ "S" (&pci_indirect),
23315+ "r" (__PCIBIOS_DS));
23316 break;
23317 }
23318
23319@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
23320
23321 switch (len) {
23322 case 1:
23323- __asm__("lcall *(%%esi); cld\n\t"
23324+ __asm__("movw %w6, %%ds\n\t"
23325+ "lcall *%%ss:(%%esi); cld\n\t"
23326+ "push %%ss\n\t"
23327+ "pop %%ds\n\t"
23328 "jc 1f\n\t"
23329 "xor %%ah, %%ah\n"
23330 "1:"
23331@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
23332 "c" (value),
23333 "b" (bx),
23334 "D" ((long)reg),
23335- "S" (&pci_indirect));
23336+ "S" (&pci_indirect),
23337+ "r" (__PCIBIOS_DS));
23338 break;
23339 case 2:
23340- __asm__("lcall *(%%esi); cld\n\t"
23341+ __asm__("movw %w6, %%ds\n\t"
23342+ "lcall *%%ss:(%%esi); cld\n\t"
23343+ "push %%ss\n\t"
23344+ "pop %%ds\n\t"
23345 "jc 1f\n\t"
23346 "xor %%ah, %%ah\n"
23347 "1:"
23348@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
23349 "c" (value),
23350 "b" (bx),
23351 "D" ((long)reg),
23352- "S" (&pci_indirect));
23353+ "S" (&pci_indirect),
23354+ "r" (__PCIBIOS_DS));
23355 break;
23356 case 4:
23357- __asm__("lcall *(%%esi); cld\n\t"
23358+ __asm__("movw %w6, %%ds\n\t"
23359+ "lcall *%%ss:(%%esi); cld\n\t"
23360+ "push %%ss\n\t"
23361+ "pop %%ds\n\t"
23362 "jc 1f\n\t"
23363 "xor %%ah, %%ah\n"
23364 "1:"
23365@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
23366 "c" (value),
23367 "b" (bx),
23368 "D" ((long)reg),
23369- "S" (&pci_indirect));
23370+ "S" (&pci_indirect),
23371+ "r" (__PCIBIOS_DS));
23372 break;
23373 }
23374
23375@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
23376 * Function table for BIOS32 access
23377 */
23378
23379-static struct pci_raw_ops pci_bios_access = {
23380+static const struct pci_raw_ops pci_bios_access = {
23381 .read = pci_bios_read,
23382 .write = pci_bios_write
23383 };
23384@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
23385 * Try to find PCI BIOS.
23386 */
23387
23388-static struct pci_raw_ops * __devinit pci_find_bios(void)
23389+static const struct pci_raw_ops * __devinit pci_find_bios(void)
23390 {
23391 union bios32 *check;
23392 unsigned char sum;
23393@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
23394
23395 DBG("PCI: Fetching IRQ routing table... ");
23396 __asm__("push %%es\n\t"
23397+ "movw %w8, %%ds\n\t"
23398 "push %%ds\n\t"
23399 "pop %%es\n\t"
23400- "lcall *(%%esi); cld\n\t"
23401+ "lcall *%%ss:(%%esi); cld\n\t"
23402 "pop %%es\n\t"
23403+ "push %%ss\n\t"
23404+ "pop %%ds\n"
23405 "jc 1f\n\t"
23406 "xor %%ah, %%ah\n"
23407 "1:"
23408@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
23409 "1" (0),
23410 "D" ((long) &opt),
23411 "S" (&pci_indirect),
23412- "m" (opt)
23413+ "m" (opt),
23414+ "r" (__PCIBIOS_DS)
23415 : "memory");
23416 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
23417 if (ret & 0xff00)
23418@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
23419 {
23420 int ret;
23421
23422- __asm__("lcall *(%%esi); cld\n\t"
23423+ __asm__("movw %w5, %%ds\n\t"
23424+ "lcall *%%ss:(%%esi); cld\n\t"
23425+ "push %%ss\n\t"
23426+ "pop %%ds\n"
23427 "jc 1f\n\t"
23428 "xor %%ah, %%ah\n"
23429 "1:"
23430@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
23431 : "0" (PCIBIOS_SET_PCI_HW_INT),
23432 "b" ((dev->bus->number << 8) | dev->devfn),
23433 "c" ((irq << 8) | (pin + 10)),
23434- "S" (&pci_indirect));
23435+ "S" (&pci_indirect),
23436+ "r" (__PCIBIOS_DS));
23437 return !(ret & 0xff00);
23438 }
23439 EXPORT_SYMBOL(pcibios_set_irq_routing);
23440diff -urNp linux-2.6.32.45/arch/x86/power/cpu.c linux-2.6.32.45/arch/x86/power/cpu.c
23441--- linux-2.6.32.45/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
23442+++ linux-2.6.32.45/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
23443@@ -129,7 +129,7 @@ static void do_fpu_end(void)
23444 static void fix_processor_context(void)
23445 {
23446 int cpu = smp_processor_id();
23447- struct tss_struct *t = &per_cpu(init_tss, cpu);
23448+ struct tss_struct *t = init_tss + cpu;
23449
23450 set_tss_desc(cpu, t); /*
23451 * This just modifies memory; should not be
23452@@ -139,7 +139,9 @@ static void fix_processor_context(void)
23453 */
23454
23455 #ifdef CONFIG_X86_64
23456+ pax_open_kernel();
23457 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
23458+ pax_close_kernel();
23459
23460 syscall_init(); /* This sets MSR_*STAR and related */
23461 #endif
23462diff -urNp linux-2.6.32.45/arch/x86/vdso/Makefile linux-2.6.32.45/arch/x86/vdso/Makefile
23463--- linux-2.6.32.45/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
23464+++ linux-2.6.32.45/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
23465@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
23466 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
23467 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
23468
23469-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23470+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23471 GCOV_PROFILE := n
23472
23473 #
23474diff -urNp linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c
23475--- linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
23476+++ linux-2.6.32.45/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
23477@@ -22,24 +22,48 @@
23478 #include <asm/hpet.h>
23479 #include <asm/unistd.h>
23480 #include <asm/io.h>
23481+#include <asm/fixmap.h>
23482 #include "vextern.h"
23483
23484 #define gtod vdso_vsyscall_gtod_data
23485
23486+notrace noinline long __vdso_fallback_time(long *t)
23487+{
23488+ long secs;
23489+ asm volatile("syscall"
23490+ : "=a" (secs)
23491+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
23492+ return secs;
23493+}
23494+
23495 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
23496 {
23497 long ret;
23498 asm("syscall" : "=a" (ret) :
23499- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
23500+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
23501 return ret;
23502 }
23503
23504+notrace static inline cycle_t __vdso_vread_hpet(void)
23505+{
23506+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
23507+}
23508+
23509+notrace static inline cycle_t __vdso_vread_tsc(void)
23510+{
23511+ cycle_t ret = (cycle_t)vget_cycles();
23512+
23513+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
23514+}
23515+
23516 notrace static inline long vgetns(void)
23517 {
23518 long v;
23519- cycles_t (*vread)(void);
23520- vread = gtod->clock.vread;
23521- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
23522+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
23523+ v = __vdso_vread_tsc();
23524+ else
23525+ v = __vdso_vread_hpet();
23526+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
23527 return (v * gtod->clock.mult) >> gtod->clock.shift;
23528 }
23529
23530@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
23531
23532 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
23533 {
23534- if (likely(gtod->sysctl_enabled))
23535+ if (likely(gtod->sysctl_enabled &&
23536+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23537+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23538 switch (clock) {
23539 case CLOCK_REALTIME:
23540 if (likely(gtod->clock.vread))
23541@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
23542 int clock_gettime(clockid_t, struct timespec *)
23543 __attribute__((weak, alias("__vdso_clock_gettime")));
23544
23545-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23546+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
23547 {
23548 long ret;
23549- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
23550+ asm("syscall" : "=a" (ret) :
23551+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
23552+ return ret;
23553+}
23554+
23555+notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23556+{
23557+ if (likely(gtod->sysctl_enabled &&
23558+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23559+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23560+ {
23561 if (likely(tv != NULL)) {
23562 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
23563 offsetof(struct timespec, tv_nsec) ||
23564@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
23565 }
23566 return 0;
23567 }
23568- asm("syscall" : "=a" (ret) :
23569- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
23570- return ret;
23571+ return __vdso_fallback_gettimeofday(tv, tz);
23572 }
23573 int gettimeofday(struct timeval *, struct timezone *)
23574 __attribute__((weak, alias("__vdso_gettimeofday")));
23575diff -urNp linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c
23576--- linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
23577+++ linux-2.6.32.45/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
23578@@ -25,6 +25,7 @@
23579 #include <asm/tlbflush.h>
23580 #include <asm/vdso.h>
23581 #include <asm/proto.h>
23582+#include <asm/mman.h>
23583
23584 enum {
23585 VDSO_DISABLED = 0,
23586@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
23587 void enable_sep_cpu(void)
23588 {
23589 int cpu = get_cpu();
23590- struct tss_struct *tss = &per_cpu(init_tss, cpu);
23591+ struct tss_struct *tss = init_tss + cpu;
23592
23593 if (!boot_cpu_has(X86_FEATURE_SEP)) {
23594 put_cpu();
23595@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
23596 gate_vma.vm_start = FIXADDR_USER_START;
23597 gate_vma.vm_end = FIXADDR_USER_END;
23598 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
23599- gate_vma.vm_page_prot = __P101;
23600+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
23601 /*
23602 * Make sure the vDSO gets into every core dump.
23603 * Dumping its contents makes post-mortem fully interpretable later
23604@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
23605 if (compat)
23606 addr = VDSO_HIGH_BASE;
23607 else {
23608- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
23609+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
23610 if (IS_ERR_VALUE(addr)) {
23611 ret = addr;
23612 goto up_fail;
23613 }
23614 }
23615
23616- current->mm->context.vdso = (void *)addr;
23617+ current->mm->context.vdso = addr;
23618
23619 if (compat_uses_vma || !compat) {
23620 /*
23621@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
23622 }
23623
23624 current_thread_info()->sysenter_return =
23625- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23626+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23627
23628 up_fail:
23629 if (ret)
23630- current->mm->context.vdso = NULL;
23631+ current->mm->context.vdso = 0;
23632
23633 up_write(&mm->mmap_sem);
23634
23635@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
23636
23637 const char *arch_vma_name(struct vm_area_struct *vma)
23638 {
23639- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23640+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23641 return "[vdso]";
23642+
23643+#ifdef CONFIG_PAX_SEGMEXEC
23644+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
23645+ return "[vdso]";
23646+#endif
23647+
23648 return NULL;
23649 }
23650
23651@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
23652 struct mm_struct *mm = tsk->mm;
23653
23654 /* Check to see if this task was created in compat vdso mode */
23655- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
23656+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
23657 return &gate_vma;
23658 return NULL;
23659 }
23660diff -urNp linux-2.6.32.45/arch/x86/vdso/vdso.lds.S linux-2.6.32.45/arch/x86/vdso/vdso.lds.S
23661--- linux-2.6.32.45/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
23662+++ linux-2.6.32.45/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
23663@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
23664 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
23665 #include "vextern.h"
23666 #undef VEXTERN
23667+
23668+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
23669+VEXTERN(fallback_gettimeofday)
23670+VEXTERN(fallback_time)
23671+VEXTERN(getcpu)
23672+#undef VEXTERN
23673diff -urNp linux-2.6.32.45/arch/x86/vdso/vextern.h linux-2.6.32.45/arch/x86/vdso/vextern.h
23674--- linux-2.6.32.45/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
23675+++ linux-2.6.32.45/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
23676@@ -11,6 +11,5 @@
23677 put into vextern.h and be referenced as a pointer with vdso prefix.
23678 The main kernel later fills in the values. */
23679
23680-VEXTERN(jiffies)
23681 VEXTERN(vgetcpu_mode)
23682 VEXTERN(vsyscall_gtod_data)
23683diff -urNp linux-2.6.32.45/arch/x86/vdso/vma.c linux-2.6.32.45/arch/x86/vdso/vma.c
23684--- linux-2.6.32.45/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
23685+++ linux-2.6.32.45/arch/x86/vdso/vma.c 2011-04-17 15:56:46.000000000 -0400
23686@@ -57,7 +57,7 @@ static int __init init_vdso_vars(void)
23687 if (!vbase)
23688 goto oom;
23689
23690- if (memcmp(vbase, "\177ELF", 4)) {
23691+ if (memcmp(vbase, ELFMAG, SELFMAG)) {
23692 printk("VDSO: I'm broken; not ELF\n");
23693 vdso_enabled = 0;
23694 }
23695@@ -66,6 +66,7 @@ static int __init init_vdso_vars(void)
23696 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
23697 #include "vextern.h"
23698 #undef VEXTERN
23699+ vunmap(vbase);
23700 return 0;
23701
23702 oom:
23703@@ -116,7 +117,7 @@ int arch_setup_additional_pages(struct l
23704 goto up_fail;
23705 }
23706
23707- current->mm->context.vdso = (void *)addr;
23708+ current->mm->context.vdso = addr;
23709
23710 ret = install_special_mapping(mm, addr, vdso_size,
23711 VM_READ|VM_EXEC|
23712@@ -124,7 +125,7 @@ int arch_setup_additional_pages(struct l
23713 VM_ALWAYSDUMP,
23714 vdso_pages);
23715 if (ret) {
23716- current->mm->context.vdso = NULL;
23717+ current->mm->context.vdso = 0;
23718 goto up_fail;
23719 }
23720
23721@@ -132,10 +133,3 @@ up_fail:
23722 up_write(&mm->mmap_sem);
23723 return ret;
23724 }
23725-
23726-static __init int vdso_setup(char *s)
23727-{
23728- vdso_enabled = simple_strtoul(s, NULL, 0);
23729- return 0;
23730-}
23731-__setup("vdso=", vdso_setup);
23732diff -urNp linux-2.6.32.45/arch/x86/xen/enlighten.c linux-2.6.32.45/arch/x86/xen/enlighten.c
23733--- linux-2.6.32.45/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
23734+++ linux-2.6.32.45/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
23735@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
23736
23737 struct shared_info xen_dummy_shared_info;
23738
23739-void *xen_initial_gdt;
23740-
23741 /*
23742 * Point at some empty memory to start with. We map the real shared_info
23743 * page as soon as fixmap is up and running.
23744@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
23745
23746 preempt_disable();
23747
23748- start = __get_cpu_var(idt_desc).address;
23749+ start = (unsigned long)__get_cpu_var(idt_desc).address;
23750 end = start + __get_cpu_var(idt_desc).size + 1;
23751
23752 xen_mc_flush();
23753@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
23754 #endif
23755 };
23756
23757-static void xen_reboot(int reason)
23758+static __noreturn void xen_reboot(int reason)
23759 {
23760 struct sched_shutdown r = { .reason = reason };
23761
23762@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
23763 BUG();
23764 }
23765
23766-static void xen_restart(char *msg)
23767+static __noreturn void xen_restart(char *msg)
23768 {
23769 xen_reboot(SHUTDOWN_reboot);
23770 }
23771
23772-static void xen_emergency_restart(void)
23773+static __noreturn void xen_emergency_restart(void)
23774 {
23775 xen_reboot(SHUTDOWN_reboot);
23776 }
23777
23778-static void xen_machine_halt(void)
23779+static __noreturn void xen_machine_halt(void)
23780 {
23781 xen_reboot(SHUTDOWN_poweroff);
23782 }
23783@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
23784 */
23785 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
23786
23787-#ifdef CONFIG_X86_64
23788 /* Work out if we support NX */
23789- check_efer();
23790+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23791+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
23792+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
23793+ unsigned l, h;
23794+
23795+#ifdef CONFIG_X86_PAE
23796+ nx_enabled = 1;
23797+#endif
23798+ __supported_pte_mask |= _PAGE_NX;
23799+ rdmsr(MSR_EFER, l, h);
23800+ l |= EFER_NX;
23801+ wrmsr(MSR_EFER, l, h);
23802+ }
23803 #endif
23804
23805 xen_setup_features();
23806@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
23807
23808 machine_ops = xen_machine_ops;
23809
23810- /*
23811- * The only reliable way to retain the initial address of the
23812- * percpu gdt_page is to remember it here, so we can go and
23813- * mark it RW later, when the initial percpu area is freed.
23814- */
23815- xen_initial_gdt = &per_cpu(gdt_page, 0);
23816-
23817 xen_smp_init();
23818
23819 pgd = (pgd_t *)xen_start_info->pt_base;
23820diff -urNp linux-2.6.32.45/arch/x86/xen/mmu.c linux-2.6.32.45/arch/x86/xen/mmu.c
23821--- linux-2.6.32.45/arch/x86/xen/mmu.c 2011-07-13 17:23:04.000000000 -0400
23822+++ linux-2.6.32.45/arch/x86/xen/mmu.c 2011-07-13 17:23:18.000000000 -0400
23823@@ -1719,6 +1719,8 @@ __init pgd_t *xen_setup_kernel_pagetable
23824 convert_pfn_mfn(init_level4_pgt);
23825 convert_pfn_mfn(level3_ident_pgt);
23826 convert_pfn_mfn(level3_kernel_pgt);
23827+ convert_pfn_mfn(level3_vmalloc_pgt);
23828+ convert_pfn_mfn(level3_vmemmap_pgt);
23829
23830 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23831 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23832@@ -1737,7 +1739,10 @@ __init pgd_t *xen_setup_kernel_pagetable
23833 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23834 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23835 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23836+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23837+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23838 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23839+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23840 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23841 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23842
23843diff -urNp linux-2.6.32.45/arch/x86/xen/smp.c linux-2.6.32.45/arch/x86/xen/smp.c
23844--- linux-2.6.32.45/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
23845+++ linux-2.6.32.45/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
23846@@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
23847 {
23848 BUG_ON(smp_processor_id() != 0);
23849 native_smp_prepare_boot_cpu();
23850-
23851- /* We've switched to the "real" per-cpu gdt, so make sure the
23852- old memory can be recycled */
23853- make_lowmem_page_readwrite(xen_initial_gdt);
23854-
23855 xen_setup_vcpu_info_placement();
23856 }
23857
23858@@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
23859 gdt = get_cpu_gdt_table(cpu);
23860
23861 ctxt->flags = VGCF_IN_KERNEL;
23862- ctxt->user_regs.ds = __USER_DS;
23863- ctxt->user_regs.es = __USER_DS;
23864+ ctxt->user_regs.ds = __KERNEL_DS;
23865+ ctxt->user_regs.es = __KERNEL_DS;
23866 ctxt->user_regs.ss = __KERNEL_DS;
23867 #ifdef CONFIG_X86_32
23868 ctxt->user_regs.fs = __KERNEL_PERCPU;
23869- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23870+ savesegment(gs, ctxt->user_regs.gs);
23871 #else
23872 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23873 #endif
23874@@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
23875 int rc;
23876
23877 per_cpu(current_task, cpu) = idle;
23878+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
23879 #ifdef CONFIG_X86_32
23880 irq_ctx_init(cpu);
23881 #else
23882 clear_tsk_thread_flag(idle, TIF_FORK);
23883- per_cpu(kernel_stack, cpu) =
23884- (unsigned long)task_stack_page(idle) -
23885- KERNEL_STACK_OFFSET + THREAD_SIZE;
23886+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23887 #endif
23888 xen_setup_runstate_info(cpu);
23889 xen_setup_timer(cpu);
23890diff -urNp linux-2.6.32.45/arch/x86/xen/xen-asm_32.S linux-2.6.32.45/arch/x86/xen/xen-asm_32.S
23891--- linux-2.6.32.45/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
23892+++ linux-2.6.32.45/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
23893@@ -83,14 +83,14 @@ ENTRY(xen_iret)
23894 ESP_OFFSET=4 # bytes pushed onto stack
23895
23896 /*
23897- * Store vcpu_info pointer for easy access. Do it this way to
23898- * avoid having to reload %fs
23899+ * Store vcpu_info pointer for easy access.
23900 */
23901 #ifdef CONFIG_SMP
23902- GET_THREAD_INFO(%eax)
23903- movl TI_cpu(%eax), %eax
23904- movl __per_cpu_offset(,%eax,4), %eax
23905- mov per_cpu__xen_vcpu(%eax), %eax
23906+ push %fs
23907+ mov $(__KERNEL_PERCPU), %eax
23908+ mov %eax, %fs
23909+ mov PER_CPU_VAR(xen_vcpu), %eax
23910+ pop %fs
23911 #else
23912 movl per_cpu__xen_vcpu, %eax
23913 #endif
23914diff -urNp linux-2.6.32.45/arch/x86/xen/xen-head.S linux-2.6.32.45/arch/x86/xen/xen-head.S
23915--- linux-2.6.32.45/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
23916+++ linux-2.6.32.45/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
23917@@ -19,6 +19,17 @@ ENTRY(startup_xen)
23918 #ifdef CONFIG_X86_32
23919 mov %esi,xen_start_info
23920 mov $init_thread_union+THREAD_SIZE,%esp
23921+#ifdef CONFIG_SMP
23922+ movl $cpu_gdt_table,%edi
23923+ movl $__per_cpu_load,%eax
23924+ movw %ax,__KERNEL_PERCPU + 2(%edi)
23925+ rorl $16,%eax
23926+ movb %al,__KERNEL_PERCPU + 4(%edi)
23927+ movb %ah,__KERNEL_PERCPU + 7(%edi)
23928+ movl $__per_cpu_end - 1,%eax
23929+ subl $__per_cpu_start,%eax
23930+ movw %ax,__KERNEL_PERCPU + 0(%edi)
23931+#endif
23932 #else
23933 mov %rsi,xen_start_info
23934 mov $init_thread_union+THREAD_SIZE,%rsp
23935diff -urNp linux-2.6.32.45/arch/x86/xen/xen-ops.h linux-2.6.32.45/arch/x86/xen/xen-ops.h
23936--- linux-2.6.32.45/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
23937+++ linux-2.6.32.45/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
23938@@ -10,8 +10,6 @@
23939 extern const char xen_hypervisor_callback[];
23940 extern const char xen_failsafe_callback[];
23941
23942-extern void *xen_initial_gdt;
23943-
23944 struct trap_info;
23945 void xen_copy_trap_info(struct trap_info *traps);
23946
23947diff -urNp linux-2.6.32.45/block/blk-integrity.c linux-2.6.32.45/block/blk-integrity.c
23948--- linux-2.6.32.45/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
23949+++ linux-2.6.32.45/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
23950@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
23951 NULL,
23952 };
23953
23954-static struct sysfs_ops integrity_ops = {
23955+static const struct sysfs_ops integrity_ops = {
23956 .show = &integrity_attr_show,
23957 .store = &integrity_attr_store,
23958 };
23959diff -urNp linux-2.6.32.45/block/blk-iopoll.c linux-2.6.32.45/block/blk-iopoll.c
23960--- linux-2.6.32.45/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
23961+++ linux-2.6.32.45/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
23962@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
23963 }
23964 EXPORT_SYMBOL(blk_iopoll_complete);
23965
23966-static void blk_iopoll_softirq(struct softirq_action *h)
23967+static void blk_iopoll_softirq(void)
23968 {
23969 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
23970 int rearm = 0, budget = blk_iopoll_budget;
23971diff -urNp linux-2.6.32.45/block/blk-map.c linux-2.6.32.45/block/blk-map.c
23972--- linux-2.6.32.45/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
23973+++ linux-2.6.32.45/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
23974@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
23975 * direct dma. else, set up kernel bounce buffers
23976 */
23977 uaddr = (unsigned long) ubuf;
23978- if (blk_rq_aligned(q, ubuf, len) && !map_data)
23979+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
23980 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
23981 else
23982 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
23983@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
23984 for (i = 0; i < iov_count; i++) {
23985 unsigned long uaddr = (unsigned long)iov[i].iov_base;
23986
23987+ if (!iov[i].iov_len)
23988+ return -EINVAL;
23989+
23990 if (uaddr & queue_dma_alignment(q)) {
23991 unaligned = 1;
23992 break;
23993 }
23994- if (!iov[i].iov_len)
23995- return -EINVAL;
23996 }
23997
23998 if (unaligned || (q->dma_pad_mask & len) || map_data)
23999@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
24000 if (!len || !kbuf)
24001 return -EINVAL;
24002
24003- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
24004+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
24005 if (do_copy)
24006 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24007 else
24008diff -urNp linux-2.6.32.45/block/blk-softirq.c linux-2.6.32.45/block/blk-softirq.c
24009--- linux-2.6.32.45/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
24010+++ linux-2.6.32.45/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
24011@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
24012 * Softirq action handler - move entries to local list and loop over them
24013 * while passing them to the queue registered handler.
24014 */
24015-static void blk_done_softirq(struct softirq_action *h)
24016+static void blk_done_softirq(void)
24017 {
24018 struct list_head *cpu_list, local_list;
24019
24020diff -urNp linux-2.6.32.45/block/blk-sysfs.c linux-2.6.32.45/block/blk-sysfs.c
24021--- linux-2.6.32.45/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
24022+++ linux-2.6.32.45/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
24023@@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
24024 kmem_cache_free(blk_requestq_cachep, q);
24025 }
24026
24027-static struct sysfs_ops queue_sysfs_ops = {
24028+static const struct sysfs_ops queue_sysfs_ops = {
24029 .show = queue_attr_show,
24030 .store = queue_attr_store,
24031 };
24032diff -urNp linux-2.6.32.45/block/bsg.c linux-2.6.32.45/block/bsg.c
24033--- linux-2.6.32.45/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
24034+++ linux-2.6.32.45/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
24035@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
24036 struct sg_io_v4 *hdr, struct bsg_device *bd,
24037 fmode_t has_write_perm)
24038 {
24039+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24040+ unsigned char *cmdptr;
24041+
24042 if (hdr->request_len > BLK_MAX_CDB) {
24043 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24044 if (!rq->cmd)
24045 return -ENOMEM;
24046- }
24047+ cmdptr = rq->cmd;
24048+ } else
24049+ cmdptr = tmpcmd;
24050
24051- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
24052+ if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
24053 hdr->request_len))
24054 return -EFAULT;
24055
24056+ if (cmdptr != rq->cmd)
24057+ memcpy(rq->cmd, cmdptr, hdr->request_len);
24058+
24059 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24060 if (blk_verify_command(rq->cmd, has_write_perm))
24061 return -EPERM;
24062diff -urNp linux-2.6.32.45/block/elevator.c linux-2.6.32.45/block/elevator.c
24063--- linux-2.6.32.45/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
24064+++ linux-2.6.32.45/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
24065@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
24066 return error;
24067 }
24068
24069-static struct sysfs_ops elv_sysfs_ops = {
24070+static const struct sysfs_ops elv_sysfs_ops = {
24071 .show = elv_attr_show,
24072 .store = elv_attr_store,
24073 };
24074diff -urNp linux-2.6.32.45/block/scsi_ioctl.c linux-2.6.32.45/block/scsi_ioctl.c
24075--- linux-2.6.32.45/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
24076+++ linux-2.6.32.45/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
24077@@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
24078 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24079 struct sg_io_hdr *hdr, fmode_t mode)
24080 {
24081- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24082+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24083+ unsigned char *cmdptr;
24084+
24085+ if (rq->cmd != rq->__cmd)
24086+ cmdptr = rq->cmd;
24087+ else
24088+ cmdptr = tmpcmd;
24089+
24090+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24091 return -EFAULT;
24092+
24093+ if (cmdptr != rq->cmd)
24094+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24095+
24096 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24097 return -EPERM;
24098
24099@@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
24100 int err;
24101 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24102 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24103+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24104+ unsigned char *cmdptr;
24105
24106 if (!sic)
24107 return -EINVAL;
24108@@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
24109 */
24110 err = -EFAULT;
24111 rq->cmd_len = cmdlen;
24112- if (copy_from_user(rq->cmd, sic->data, cmdlen))
24113+
24114+ if (rq->cmd != rq->__cmd)
24115+ cmdptr = rq->cmd;
24116+ else
24117+ cmdptr = tmpcmd;
24118+
24119+ if (copy_from_user(cmdptr, sic->data, cmdlen))
24120 goto error;
24121
24122+ if (rq->cmd != cmdptr)
24123+ memcpy(rq->cmd, cmdptr, cmdlen);
24124+
24125 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
24126 goto error;
24127
24128diff -urNp linux-2.6.32.45/crypto/cryptd.c linux-2.6.32.45/crypto/cryptd.c
24129--- linux-2.6.32.45/crypto/cryptd.c 2011-03-27 14:31:47.000000000 -0400
24130+++ linux-2.6.32.45/crypto/cryptd.c 2011-08-05 20:33:55.000000000 -0400
24131@@ -214,7 +214,7 @@ static int cryptd_blkcipher_enqueue(stru
24132 struct cryptd_queue *queue;
24133
24134 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
24135- rctx->complete = req->base.complete;
24136+ *(void **)&rctx->complete = req->base.complete;
24137 req->base.complete = complete;
24138
24139 return cryptd_enqueue_request(queue, &req->base);
24140diff -urNp linux-2.6.32.45/crypto/gf128mul.c linux-2.6.32.45/crypto/gf128mul.c
24141--- linux-2.6.32.45/crypto/gf128mul.c 2011-03-27 14:31:47.000000000 -0400
24142+++ linux-2.6.32.45/crypto/gf128mul.c 2011-07-06 19:53:33.000000000 -0400
24143@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
24144 for (i = 0; i < 7; ++i)
24145 gf128mul_x_lle(&p[i + 1], &p[i]);
24146
24147- memset(r, 0, sizeof(r));
24148+ memset(r, 0, sizeof(*r));
24149 for (i = 0;;) {
24150 u8 ch = ((u8 *)b)[15 - i];
24151
24152@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
24153 for (i = 0; i < 7; ++i)
24154 gf128mul_x_bbe(&p[i + 1], &p[i]);
24155
24156- memset(r, 0, sizeof(r));
24157+ memset(r, 0, sizeof(*r));
24158 for (i = 0;;) {
24159 u8 ch = ((u8 *)b)[i];
24160
24161diff -urNp linux-2.6.32.45/crypto/serpent.c linux-2.6.32.45/crypto/serpent.c
24162--- linux-2.6.32.45/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
24163+++ linux-2.6.32.45/crypto/serpent.c 2011-08-18 23:59:56.000000000 -0400
24164@@ -21,6 +21,7 @@
24165 #include <asm/byteorder.h>
24166 #include <linux/crypto.h>
24167 #include <linux/types.h>
24168+#include <linux/sched.h>
24169
24170 /* Key is padded to the maximum of 256 bits before round key generation.
24171 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
24172@@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_
24173 u32 r0,r1,r2,r3,r4;
24174 int i;
24175
24176+ pax_track_stack();
24177+
24178 /* Copy key, add padding */
24179
24180 for (i = 0; i < keylen; ++i)
24181diff -urNp linux-2.6.32.45/Documentation/dontdiff linux-2.6.32.45/Documentation/dontdiff
24182--- linux-2.6.32.45/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
24183+++ linux-2.6.32.45/Documentation/dontdiff 2011-08-21 18:59:02.000000000 -0400
24184@@ -1,13 +1,16 @@
24185 *.a
24186 *.aux
24187 *.bin
24188+*.cis
24189 *.cpio
24190 *.csp
24191+*.dbg
24192 *.dsp
24193 *.dvi
24194 *.elf
24195 *.eps
24196 *.fw
24197+*.gcno
24198 *.gen.S
24199 *.gif
24200 *.grep
24201@@ -38,8 +41,10 @@
24202 *.tab.h
24203 *.tex
24204 *.ver
24205+*.vim
24206 *.xml
24207 *_MODULES
24208+*_reg_safe.h
24209 *_vga16.c
24210 *~
24211 *.9
24212@@ -49,11 +54,16 @@
24213 53c700_d.h
24214 CVS
24215 ChangeSet
24216+GPATH
24217+GRTAGS
24218+GSYMS
24219+GTAGS
24220 Image
24221 Kerntypes
24222 Module.markers
24223 Module.symvers
24224 PENDING
24225+PERF*
24226 SCCS
24227 System.map*
24228 TAGS
24229@@ -76,7 +86,11 @@ btfixupprep
24230 build
24231 bvmlinux
24232 bzImage*
24233+capability_names.h
24234+capflags.c
24235 classlist.h*
24236+clut_vga16.c
24237+common-cmds.h
24238 comp*.log
24239 compile.h*
24240 conf
24241@@ -97,19 +111,21 @@ elfconfig.h*
24242 fixdep
24243 fore200e_mkfirm
24244 fore200e_pca_fw.c*
24245+gate.lds
24246 gconf
24247 gen-devlist
24248 gen_crc32table
24249 gen_init_cpio
24250 genksyms
24251 *_gray256.c
24252+hash
24253 ihex2fw
24254 ikconfig.h*
24255 initramfs_data.cpio
24256+initramfs_data.cpio.bz2
24257 initramfs_data.cpio.gz
24258 initramfs_list
24259 kallsyms
24260-kconfig
24261 keywords.c
24262 ksym.c*
24263 ksym.h*
24264@@ -133,7 +149,9 @@ mkboot
24265 mkbugboot
24266 mkcpustr
24267 mkdep
24268+mkpiggy
24269 mkprep
24270+mkregtable
24271 mktables
24272 mktree
24273 modpost
24274@@ -149,6 +167,7 @@ patches*
24275 pca200e.bin
24276 pca200e_ecd.bin2
24277 piggy.gz
24278+piggy.S
24279 piggyback
24280 pnmtologo
24281 ppc_defs.h*
24282@@ -157,12 +176,15 @@ qconf
24283 raid6altivec*.c
24284 raid6int*.c
24285 raid6tables.c
24286+regdb.c
24287 relocs
24288+rlim_names.h
24289 series
24290 setup
24291 setup.bin
24292 setup.elf
24293 sImage
24294+slabinfo
24295 sm_tbl*
24296 split-include
24297 syscalltab.h
24298@@ -186,14 +208,20 @@ version.h*
24299 vmlinux
24300 vmlinux-*
24301 vmlinux.aout
24302+vmlinux.bin.all
24303+vmlinux.bin.bz2
24304 vmlinux.lds
24305+vmlinux.relocs
24306+voffset.h
24307 vsyscall.lds
24308 vsyscall_32.lds
24309 wanxlfw.inc
24310 uImage
24311 unifdef
24312+utsrelease.h
24313 wakeup.bin
24314 wakeup.elf
24315 wakeup.lds
24316 zImage*
24317 zconf.hash.c
24318+zoffset.h
24319diff -urNp linux-2.6.32.45/Documentation/kernel-parameters.txt linux-2.6.32.45/Documentation/kernel-parameters.txt
24320--- linux-2.6.32.45/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
24321+++ linux-2.6.32.45/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
24322@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
24323 the specified number of seconds. This is to be used if
24324 your oopses keep scrolling off the screen.
24325
24326+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
24327+ virtualization environments that don't cope well with the
24328+ expand down segment used by UDEREF on X86-32 or the frequent
24329+ page table updates on X86-64.
24330+
24331+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
24332+
24333 pcbit= [HW,ISDN]
24334
24335 pcd. [PARIDE]
24336diff -urNp linux-2.6.32.45/drivers/acpi/acpi_pad.c linux-2.6.32.45/drivers/acpi/acpi_pad.c
24337--- linux-2.6.32.45/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
24338+++ linux-2.6.32.45/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
24339@@ -30,7 +30,7 @@
24340 #include <acpi/acpi_bus.h>
24341 #include <acpi/acpi_drivers.h>
24342
24343-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
24344+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
24345 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
24346 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
24347 static DEFINE_MUTEX(isolated_cpus_lock);
24348diff -urNp linux-2.6.32.45/drivers/acpi/battery.c linux-2.6.32.45/drivers/acpi/battery.c
24349--- linux-2.6.32.45/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
24350+++ linux-2.6.32.45/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
24351@@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
24352 }
24353
24354 static struct battery_file {
24355- struct file_operations ops;
24356+ const struct file_operations ops;
24357 mode_t mode;
24358 const char *name;
24359 } acpi_battery_file[] = {
24360diff -urNp linux-2.6.32.45/drivers/acpi/dock.c linux-2.6.32.45/drivers/acpi/dock.c
24361--- linux-2.6.32.45/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
24362+++ linux-2.6.32.45/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
24363@@ -77,7 +77,7 @@ struct dock_dependent_device {
24364 struct list_head list;
24365 struct list_head hotplug_list;
24366 acpi_handle handle;
24367- struct acpi_dock_ops *ops;
24368+ const struct acpi_dock_ops *ops;
24369 void *context;
24370 };
24371
24372@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
24373 * the dock driver after _DCK is executed.
24374 */
24375 int
24376-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
24377+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
24378 void *context)
24379 {
24380 struct dock_dependent_device *dd;
24381diff -urNp linux-2.6.32.45/drivers/acpi/osl.c linux-2.6.32.45/drivers/acpi/osl.c
24382--- linux-2.6.32.45/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
24383+++ linux-2.6.32.45/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
24384@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
24385 void __iomem *virt_addr;
24386
24387 virt_addr = ioremap(phys_addr, width);
24388+ if (!virt_addr)
24389+ return AE_NO_MEMORY;
24390 if (!value)
24391 value = &dummy;
24392
24393@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
24394 void __iomem *virt_addr;
24395
24396 virt_addr = ioremap(phys_addr, width);
24397+ if (!virt_addr)
24398+ return AE_NO_MEMORY;
24399
24400 switch (width) {
24401 case 8:
24402diff -urNp linux-2.6.32.45/drivers/acpi/power_meter.c linux-2.6.32.45/drivers/acpi/power_meter.c
24403--- linux-2.6.32.45/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
24404+++ linux-2.6.32.45/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
24405@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
24406 return res;
24407
24408 temp /= 1000;
24409- if (temp < 0)
24410- return -EINVAL;
24411
24412 mutex_lock(&resource->lock);
24413 resource->trip[attr->index - 7] = temp;
24414diff -urNp linux-2.6.32.45/drivers/acpi/proc.c linux-2.6.32.45/drivers/acpi/proc.c
24415--- linux-2.6.32.45/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
24416+++ linux-2.6.32.45/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
24417@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
24418 size_t count, loff_t * ppos)
24419 {
24420 struct list_head *node, *next;
24421- char strbuf[5];
24422- char str[5] = "";
24423- unsigned int len = count;
24424+ char strbuf[5] = {0};
24425 struct acpi_device *found_dev = NULL;
24426
24427- if (len > 4)
24428- len = 4;
24429- if (len < 0)
24430- return -EFAULT;
24431+ if (count > 4)
24432+ count = 4;
24433
24434- if (copy_from_user(strbuf, buffer, len))
24435+ if (copy_from_user(strbuf, buffer, count))
24436 return -EFAULT;
24437- strbuf[len] = '\0';
24438- sscanf(strbuf, "%s", str);
24439+ strbuf[count] = '\0';
24440
24441 mutex_lock(&acpi_device_lock);
24442 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
24443@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
24444 if (!dev->wakeup.flags.valid)
24445 continue;
24446
24447- if (!strncmp(dev->pnp.bus_id, str, 4)) {
24448+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
24449 dev->wakeup.state.enabled =
24450 dev->wakeup.state.enabled ? 0 : 1;
24451 found_dev = dev;
24452diff -urNp linux-2.6.32.45/drivers/acpi/processor_core.c linux-2.6.32.45/drivers/acpi/processor_core.c
24453--- linux-2.6.32.45/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
24454+++ linux-2.6.32.45/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
24455@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
24456 return 0;
24457 }
24458
24459- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
24460+ BUG_ON(pr->id >= nr_cpu_ids);
24461
24462 /*
24463 * Buggy BIOS check
24464diff -urNp linux-2.6.32.45/drivers/acpi/sbshc.c linux-2.6.32.45/drivers/acpi/sbshc.c
24465--- linux-2.6.32.45/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
24466+++ linux-2.6.32.45/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
24467@@ -17,7 +17,7 @@
24468
24469 #define PREFIX "ACPI: "
24470
24471-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
24472+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
24473 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
24474
24475 struct acpi_smb_hc {
24476diff -urNp linux-2.6.32.45/drivers/acpi/sleep.c linux-2.6.32.45/drivers/acpi/sleep.c
24477--- linux-2.6.32.45/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
24478+++ linux-2.6.32.45/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
24479@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
24480 }
24481 }
24482
24483-static struct platform_suspend_ops acpi_suspend_ops = {
24484+static const struct platform_suspend_ops acpi_suspend_ops = {
24485 .valid = acpi_suspend_state_valid,
24486 .begin = acpi_suspend_begin,
24487 .prepare_late = acpi_pm_prepare,
24488@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
24489 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24490 * been requested.
24491 */
24492-static struct platform_suspend_ops acpi_suspend_ops_old = {
24493+static const struct platform_suspend_ops acpi_suspend_ops_old = {
24494 .valid = acpi_suspend_state_valid,
24495 .begin = acpi_suspend_begin_old,
24496 .prepare_late = acpi_pm_disable_gpes,
24497@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
24498 acpi_enable_all_runtime_gpes();
24499 }
24500
24501-static struct platform_hibernation_ops acpi_hibernation_ops = {
24502+static const struct platform_hibernation_ops acpi_hibernation_ops = {
24503 .begin = acpi_hibernation_begin,
24504 .end = acpi_pm_end,
24505 .pre_snapshot = acpi_hibernation_pre_snapshot,
24506@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
24507 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24508 * been requested.
24509 */
24510-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
24511+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
24512 .begin = acpi_hibernation_begin_old,
24513 .end = acpi_pm_end,
24514 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
24515diff -urNp linux-2.6.32.45/drivers/acpi/video.c linux-2.6.32.45/drivers/acpi/video.c
24516--- linux-2.6.32.45/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
24517+++ linux-2.6.32.45/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
24518@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
24519 vd->brightness->levels[request_level]);
24520 }
24521
24522-static struct backlight_ops acpi_backlight_ops = {
24523+static const struct backlight_ops acpi_backlight_ops = {
24524 .get_brightness = acpi_video_get_brightness,
24525 .update_status = acpi_video_set_brightness,
24526 };
24527diff -urNp linux-2.6.32.45/drivers/ata/ahci.c linux-2.6.32.45/drivers/ata/ahci.c
24528--- linux-2.6.32.45/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
24529+++ linux-2.6.32.45/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
24530@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
24531 .sdev_attrs = ahci_sdev_attrs,
24532 };
24533
24534-static struct ata_port_operations ahci_ops = {
24535+static const struct ata_port_operations ahci_ops = {
24536 .inherits = &sata_pmp_port_ops,
24537
24538 .qc_defer = sata_pmp_qc_defer_cmd_switch,
24539@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
24540 .port_stop = ahci_port_stop,
24541 };
24542
24543-static struct ata_port_operations ahci_vt8251_ops = {
24544+static const struct ata_port_operations ahci_vt8251_ops = {
24545 .inherits = &ahci_ops,
24546 .hardreset = ahci_vt8251_hardreset,
24547 };
24548
24549-static struct ata_port_operations ahci_p5wdh_ops = {
24550+static const struct ata_port_operations ahci_p5wdh_ops = {
24551 .inherits = &ahci_ops,
24552 .hardreset = ahci_p5wdh_hardreset,
24553 };
24554
24555-static struct ata_port_operations ahci_sb600_ops = {
24556+static const struct ata_port_operations ahci_sb600_ops = {
24557 .inherits = &ahci_ops,
24558 .softreset = ahci_sb600_softreset,
24559 .pmp_softreset = ahci_sb600_softreset,
24560diff -urNp linux-2.6.32.45/drivers/ata/ata_generic.c linux-2.6.32.45/drivers/ata/ata_generic.c
24561--- linux-2.6.32.45/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
24562+++ linux-2.6.32.45/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
24563@@ -104,7 +104,7 @@ static struct scsi_host_template generic
24564 ATA_BMDMA_SHT(DRV_NAME),
24565 };
24566
24567-static struct ata_port_operations generic_port_ops = {
24568+static const struct ata_port_operations generic_port_ops = {
24569 .inherits = &ata_bmdma_port_ops,
24570 .cable_detect = ata_cable_unknown,
24571 .set_mode = generic_set_mode,
24572diff -urNp linux-2.6.32.45/drivers/ata/ata_piix.c linux-2.6.32.45/drivers/ata/ata_piix.c
24573--- linux-2.6.32.45/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
24574+++ linux-2.6.32.45/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
24575@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
24576 ATA_BMDMA_SHT(DRV_NAME),
24577 };
24578
24579-static struct ata_port_operations piix_pata_ops = {
24580+static const struct ata_port_operations piix_pata_ops = {
24581 .inherits = &ata_bmdma32_port_ops,
24582 .cable_detect = ata_cable_40wire,
24583 .set_piomode = piix_set_piomode,
24584@@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
24585 .prereset = piix_pata_prereset,
24586 };
24587
24588-static struct ata_port_operations piix_vmw_ops = {
24589+static const struct ata_port_operations piix_vmw_ops = {
24590 .inherits = &piix_pata_ops,
24591 .bmdma_status = piix_vmw_bmdma_status,
24592 };
24593
24594-static struct ata_port_operations ich_pata_ops = {
24595+static const struct ata_port_operations ich_pata_ops = {
24596 .inherits = &piix_pata_ops,
24597 .cable_detect = ich_pata_cable_detect,
24598 .set_dmamode = ich_set_dmamode,
24599 };
24600
24601-static struct ata_port_operations piix_sata_ops = {
24602+static const struct ata_port_operations piix_sata_ops = {
24603 .inherits = &ata_bmdma_port_ops,
24604 };
24605
24606-static struct ata_port_operations piix_sidpr_sata_ops = {
24607+static const struct ata_port_operations piix_sidpr_sata_ops = {
24608 .inherits = &piix_sata_ops,
24609 .hardreset = sata_std_hardreset,
24610 .scr_read = piix_sidpr_scr_read,
24611diff -urNp linux-2.6.32.45/drivers/ata/libata-acpi.c linux-2.6.32.45/drivers/ata/libata-acpi.c
24612--- linux-2.6.32.45/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
24613+++ linux-2.6.32.45/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
24614@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
24615 ata_acpi_uevent(dev->link->ap, dev, event);
24616 }
24617
24618-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24619+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24620 .handler = ata_acpi_dev_notify_dock,
24621 .uevent = ata_acpi_dev_uevent,
24622 };
24623
24624-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24625+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24626 .handler = ata_acpi_ap_notify_dock,
24627 .uevent = ata_acpi_ap_uevent,
24628 };
24629diff -urNp linux-2.6.32.45/drivers/ata/libata-core.c linux-2.6.32.45/drivers/ata/libata-core.c
24630--- linux-2.6.32.45/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
24631+++ linux-2.6.32.45/drivers/ata/libata-core.c 2011-08-05 20:33:55.000000000 -0400
24632@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
24633 struct ata_port *ap;
24634 unsigned int tag;
24635
24636- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24637+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24638 ap = qc->ap;
24639
24640 qc->flags = 0;
24641@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
24642 struct ata_port *ap;
24643 struct ata_link *link;
24644
24645- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24646+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24647 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
24648 ap = qc->ap;
24649 link = qc->dev->link;
24650@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
24651 * LOCKING:
24652 * None.
24653 */
24654-static void ata_finalize_port_ops(struct ata_port_operations *ops)
24655+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
24656 {
24657 static DEFINE_SPINLOCK(lock);
24658 const struct ata_port_operations *cur;
24659@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
24660 return;
24661
24662 spin_lock(&lock);
24663+ pax_open_kernel();
24664
24665 for (cur = ops->inherits; cur; cur = cur->inherits) {
24666 void **inherit = (void **)cur;
24667@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
24668 if (IS_ERR(*pp))
24669 *pp = NULL;
24670
24671- ops->inherits = NULL;
24672+ *(struct ata_port_operations **)&ops->inherits = NULL;
24673
24674+ pax_close_kernel();
24675 spin_unlock(&lock);
24676 }
24677
24678@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
24679 */
24680 /* KILLME - the only user left is ipr */
24681 void ata_host_init(struct ata_host *host, struct device *dev,
24682- unsigned long flags, struct ata_port_operations *ops)
24683+ unsigned long flags, const struct ata_port_operations *ops)
24684 {
24685 spin_lock_init(&host->lock);
24686 host->dev = dev;
24687@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
24688 /* truly dummy */
24689 }
24690
24691-struct ata_port_operations ata_dummy_port_ops = {
24692+const struct ata_port_operations ata_dummy_port_ops = {
24693 .qc_prep = ata_noop_qc_prep,
24694 .qc_issue = ata_dummy_qc_issue,
24695 .error_handler = ata_dummy_error_handler,
24696diff -urNp linux-2.6.32.45/drivers/ata/libata-eh.c linux-2.6.32.45/drivers/ata/libata-eh.c
24697--- linux-2.6.32.45/drivers/ata/libata-eh.c 2011-08-09 18:35:28.000000000 -0400
24698+++ linux-2.6.32.45/drivers/ata/libata-eh.c 2011-08-09 18:33:59.000000000 -0400
24699@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
24700 {
24701 struct ata_link *link;
24702
24703+ pax_track_stack();
24704+
24705 ata_for_each_link(link, ap, HOST_FIRST)
24706 ata_eh_link_report(link);
24707 }
24708@@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_
24709 */
24710 void ata_std_error_handler(struct ata_port *ap)
24711 {
24712- struct ata_port_operations *ops = ap->ops;
24713+ const struct ata_port_operations *ops = ap->ops;
24714 ata_reset_fn_t hardreset = ops->hardreset;
24715
24716 /* ignore built-in hardreset if SCR access is not available */
24717diff -urNp linux-2.6.32.45/drivers/ata/libata-pmp.c linux-2.6.32.45/drivers/ata/libata-pmp.c
24718--- linux-2.6.32.45/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
24719+++ linux-2.6.32.45/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
24720@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
24721 */
24722 static int sata_pmp_eh_recover(struct ata_port *ap)
24723 {
24724- struct ata_port_operations *ops = ap->ops;
24725+ const struct ata_port_operations *ops = ap->ops;
24726 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
24727 struct ata_link *pmp_link = &ap->link;
24728 struct ata_device *pmp_dev = pmp_link->device;
24729diff -urNp linux-2.6.32.45/drivers/ata/pata_acpi.c linux-2.6.32.45/drivers/ata/pata_acpi.c
24730--- linux-2.6.32.45/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
24731+++ linux-2.6.32.45/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
24732@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
24733 ATA_BMDMA_SHT(DRV_NAME),
24734 };
24735
24736-static struct ata_port_operations pacpi_ops = {
24737+static const struct ata_port_operations pacpi_ops = {
24738 .inherits = &ata_bmdma_port_ops,
24739 .qc_issue = pacpi_qc_issue,
24740 .cable_detect = pacpi_cable_detect,
24741diff -urNp linux-2.6.32.45/drivers/ata/pata_ali.c linux-2.6.32.45/drivers/ata/pata_ali.c
24742--- linux-2.6.32.45/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
24743+++ linux-2.6.32.45/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
24744@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
24745 * Port operations for PIO only ALi
24746 */
24747
24748-static struct ata_port_operations ali_early_port_ops = {
24749+static const struct ata_port_operations ali_early_port_ops = {
24750 .inherits = &ata_sff_port_ops,
24751 .cable_detect = ata_cable_40wire,
24752 .set_piomode = ali_set_piomode,
24753@@ -382,7 +382,7 @@ static const struct ata_port_operations
24754 * Port operations for DMA capable ALi without cable
24755 * detect
24756 */
24757-static struct ata_port_operations ali_20_port_ops = {
24758+static const struct ata_port_operations ali_20_port_ops = {
24759 .inherits = &ali_dma_base_ops,
24760 .cable_detect = ata_cable_40wire,
24761 .mode_filter = ali_20_filter,
24762@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
24763 /*
24764 * Port operations for DMA capable ALi with cable detect
24765 */
24766-static struct ata_port_operations ali_c2_port_ops = {
24767+static const struct ata_port_operations ali_c2_port_ops = {
24768 .inherits = &ali_dma_base_ops,
24769 .check_atapi_dma = ali_check_atapi_dma,
24770 .cable_detect = ali_c2_cable_detect,
24771@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
24772 /*
24773 * Port operations for DMA capable ALi with cable detect
24774 */
24775-static struct ata_port_operations ali_c4_port_ops = {
24776+static const struct ata_port_operations ali_c4_port_ops = {
24777 .inherits = &ali_dma_base_ops,
24778 .check_atapi_dma = ali_check_atapi_dma,
24779 .cable_detect = ali_c2_cable_detect,
24780@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
24781 /*
24782 * Port operations for DMA capable ALi with cable detect and LBA48
24783 */
24784-static struct ata_port_operations ali_c5_port_ops = {
24785+static const struct ata_port_operations ali_c5_port_ops = {
24786 .inherits = &ali_dma_base_ops,
24787 .check_atapi_dma = ali_check_atapi_dma,
24788 .dev_config = ali_warn_atapi_dma,
24789diff -urNp linux-2.6.32.45/drivers/ata/pata_amd.c linux-2.6.32.45/drivers/ata/pata_amd.c
24790--- linux-2.6.32.45/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
24791+++ linux-2.6.32.45/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
24792@@ -397,28 +397,28 @@ static const struct ata_port_operations
24793 .prereset = amd_pre_reset,
24794 };
24795
24796-static struct ata_port_operations amd33_port_ops = {
24797+static const struct ata_port_operations amd33_port_ops = {
24798 .inherits = &amd_base_port_ops,
24799 .cable_detect = ata_cable_40wire,
24800 .set_piomode = amd33_set_piomode,
24801 .set_dmamode = amd33_set_dmamode,
24802 };
24803
24804-static struct ata_port_operations amd66_port_ops = {
24805+static const struct ata_port_operations amd66_port_ops = {
24806 .inherits = &amd_base_port_ops,
24807 .cable_detect = ata_cable_unknown,
24808 .set_piomode = amd66_set_piomode,
24809 .set_dmamode = amd66_set_dmamode,
24810 };
24811
24812-static struct ata_port_operations amd100_port_ops = {
24813+static const struct ata_port_operations amd100_port_ops = {
24814 .inherits = &amd_base_port_ops,
24815 .cable_detect = ata_cable_unknown,
24816 .set_piomode = amd100_set_piomode,
24817 .set_dmamode = amd100_set_dmamode,
24818 };
24819
24820-static struct ata_port_operations amd133_port_ops = {
24821+static const struct ata_port_operations amd133_port_ops = {
24822 .inherits = &amd_base_port_ops,
24823 .cable_detect = amd_cable_detect,
24824 .set_piomode = amd133_set_piomode,
24825@@ -433,13 +433,13 @@ static const struct ata_port_operations
24826 .host_stop = nv_host_stop,
24827 };
24828
24829-static struct ata_port_operations nv100_port_ops = {
24830+static const struct ata_port_operations nv100_port_ops = {
24831 .inherits = &nv_base_port_ops,
24832 .set_piomode = nv100_set_piomode,
24833 .set_dmamode = nv100_set_dmamode,
24834 };
24835
24836-static struct ata_port_operations nv133_port_ops = {
24837+static const struct ata_port_operations nv133_port_ops = {
24838 .inherits = &nv_base_port_ops,
24839 .set_piomode = nv133_set_piomode,
24840 .set_dmamode = nv133_set_dmamode,
24841diff -urNp linux-2.6.32.45/drivers/ata/pata_artop.c linux-2.6.32.45/drivers/ata/pata_artop.c
24842--- linux-2.6.32.45/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
24843+++ linux-2.6.32.45/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
24844@@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
24845 ATA_BMDMA_SHT(DRV_NAME),
24846 };
24847
24848-static struct ata_port_operations artop6210_ops = {
24849+static const struct ata_port_operations artop6210_ops = {
24850 .inherits = &ata_bmdma_port_ops,
24851 .cable_detect = ata_cable_40wire,
24852 .set_piomode = artop6210_set_piomode,
24853@@ -320,7 +320,7 @@ static struct ata_port_operations artop6
24854 .qc_defer = artop6210_qc_defer,
24855 };
24856
24857-static struct ata_port_operations artop6260_ops = {
24858+static const struct ata_port_operations artop6260_ops = {
24859 .inherits = &ata_bmdma_port_ops,
24860 .cable_detect = artop6260_cable_detect,
24861 .set_piomode = artop6260_set_piomode,
24862diff -urNp linux-2.6.32.45/drivers/ata/pata_at32.c linux-2.6.32.45/drivers/ata/pata_at32.c
24863--- linux-2.6.32.45/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
24864+++ linux-2.6.32.45/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
24865@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
24866 ATA_PIO_SHT(DRV_NAME),
24867 };
24868
24869-static struct ata_port_operations at32_port_ops = {
24870+static const struct ata_port_operations at32_port_ops = {
24871 .inherits = &ata_sff_port_ops,
24872 .cable_detect = ata_cable_40wire,
24873 .set_piomode = pata_at32_set_piomode,
24874diff -urNp linux-2.6.32.45/drivers/ata/pata_at91.c linux-2.6.32.45/drivers/ata/pata_at91.c
24875--- linux-2.6.32.45/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
24876+++ linux-2.6.32.45/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
24877@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
24878 ATA_PIO_SHT(DRV_NAME),
24879 };
24880
24881-static struct ata_port_operations pata_at91_port_ops = {
24882+static const struct ata_port_operations pata_at91_port_ops = {
24883 .inherits = &ata_sff_port_ops,
24884
24885 .sff_data_xfer = pata_at91_data_xfer_noirq,
24886diff -urNp linux-2.6.32.45/drivers/ata/pata_atiixp.c linux-2.6.32.45/drivers/ata/pata_atiixp.c
24887--- linux-2.6.32.45/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
24888+++ linux-2.6.32.45/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
24889@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
24890 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24891 };
24892
24893-static struct ata_port_operations atiixp_port_ops = {
24894+static const struct ata_port_operations atiixp_port_ops = {
24895 .inherits = &ata_bmdma_port_ops,
24896
24897 .qc_prep = ata_sff_dumb_qc_prep,
24898diff -urNp linux-2.6.32.45/drivers/ata/pata_atp867x.c linux-2.6.32.45/drivers/ata/pata_atp867x.c
24899--- linux-2.6.32.45/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
24900+++ linux-2.6.32.45/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
24901@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
24902 ATA_BMDMA_SHT(DRV_NAME),
24903 };
24904
24905-static struct ata_port_operations atp867x_ops = {
24906+static const struct ata_port_operations atp867x_ops = {
24907 .inherits = &ata_bmdma_port_ops,
24908 .cable_detect = atp867x_cable_detect,
24909 .set_piomode = atp867x_set_piomode,
24910diff -urNp linux-2.6.32.45/drivers/ata/pata_bf54x.c linux-2.6.32.45/drivers/ata/pata_bf54x.c
24911--- linux-2.6.32.45/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
24912+++ linux-2.6.32.45/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
24913@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
24914 .dma_boundary = ATA_DMA_BOUNDARY,
24915 };
24916
24917-static struct ata_port_operations bfin_pata_ops = {
24918+static const struct ata_port_operations bfin_pata_ops = {
24919 .inherits = &ata_sff_port_ops,
24920
24921 .set_piomode = bfin_set_piomode,
24922diff -urNp linux-2.6.32.45/drivers/ata/pata_cmd640.c linux-2.6.32.45/drivers/ata/pata_cmd640.c
24923--- linux-2.6.32.45/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
24924+++ linux-2.6.32.45/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
24925@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
24926 ATA_BMDMA_SHT(DRV_NAME),
24927 };
24928
24929-static struct ata_port_operations cmd640_port_ops = {
24930+static const struct ata_port_operations cmd640_port_ops = {
24931 .inherits = &ata_bmdma_port_ops,
24932 /* In theory xfer_noirq is not needed once we kill the prefetcher */
24933 .sff_data_xfer = ata_sff_data_xfer_noirq,
24934diff -urNp linux-2.6.32.45/drivers/ata/pata_cmd64x.c linux-2.6.32.45/drivers/ata/pata_cmd64x.c
24935--- linux-2.6.32.45/drivers/ata/pata_cmd64x.c 2011-06-25 12:55:34.000000000 -0400
24936+++ linux-2.6.32.45/drivers/ata/pata_cmd64x.c 2011-06-25 12:56:37.000000000 -0400
24937@@ -271,18 +271,18 @@ static const struct ata_port_operations
24938 .set_dmamode = cmd64x_set_dmamode,
24939 };
24940
24941-static struct ata_port_operations cmd64x_port_ops = {
24942+static const struct ata_port_operations cmd64x_port_ops = {
24943 .inherits = &cmd64x_base_ops,
24944 .cable_detect = ata_cable_40wire,
24945 };
24946
24947-static struct ata_port_operations cmd646r1_port_ops = {
24948+static const struct ata_port_operations cmd646r1_port_ops = {
24949 .inherits = &cmd64x_base_ops,
24950 .bmdma_stop = cmd646r1_bmdma_stop,
24951 .cable_detect = ata_cable_40wire,
24952 };
24953
24954-static struct ata_port_operations cmd648_port_ops = {
24955+static const struct ata_port_operations cmd648_port_ops = {
24956 .inherits = &cmd64x_base_ops,
24957 .bmdma_stop = cmd648_bmdma_stop,
24958 .cable_detect = cmd648_cable_detect,
24959diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5520.c linux-2.6.32.45/drivers/ata/pata_cs5520.c
24960--- linux-2.6.32.45/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
24961+++ linux-2.6.32.45/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
24962@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
24963 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24964 };
24965
24966-static struct ata_port_operations cs5520_port_ops = {
24967+static const struct ata_port_operations cs5520_port_ops = {
24968 .inherits = &ata_bmdma_port_ops,
24969 .qc_prep = ata_sff_dumb_qc_prep,
24970 .cable_detect = ata_cable_40wire,
24971diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5530.c linux-2.6.32.45/drivers/ata/pata_cs5530.c
24972--- linux-2.6.32.45/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
24973+++ linux-2.6.32.45/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
24974@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
24975 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24976 };
24977
24978-static struct ata_port_operations cs5530_port_ops = {
24979+static const struct ata_port_operations cs5530_port_ops = {
24980 .inherits = &ata_bmdma_port_ops,
24981
24982 .qc_prep = ata_sff_dumb_qc_prep,
24983diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5535.c linux-2.6.32.45/drivers/ata/pata_cs5535.c
24984--- linux-2.6.32.45/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
24985+++ linux-2.6.32.45/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
24986@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
24987 ATA_BMDMA_SHT(DRV_NAME),
24988 };
24989
24990-static struct ata_port_operations cs5535_port_ops = {
24991+static const struct ata_port_operations cs5535_port_ops = {
24992 .inherits = &ata_bmdma_port_ops,
24993 .cable_detect = cs5535_cable_detect,
24994 .set_piomode = cs5535_set_piomode,
24995diff -urNp linux-2.6.32.45/drivers/ata/pata_cs5536.c linux-2.6.32.45/drivers/ata/pata_cs5536.c
24996--- linux-2.6.32.45/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
24997+++ linux-2.6.32.45/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
24998@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
24999 ATA_BMDMA_SHT(DRV_NAME),
25000 };
25001
25002-static struct ata_port_operations cs5536_port_ops = {
25003+static const struct ata_port_operations cs5536_port_ops = {
25004 .inherits = &ata_bmdma_port_ops,
25005 .cable_detect = cs5536_cable_detect,
25006 .set_piomode = cs5536_set_piomode,
25007diff -urNp linux-2.6.32.45/drivers/ata/pata_cypress.c linux-2.6.32.45/drivers/ata/pata_cypress.c
25008--- linux-2.6.32.45/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
25009+++ linux-2.6.32.45/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
25010@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
25011 ATA_BMDMA_SHT(DRV_NAME),
25012 };
25013
25014-static struct ata_port_operations cy82c693_port_ops = {
25015+static const struct ata_port_operations cy82c693_port_ops = {
25016 .inherits = &ata_bmdma_port_ops,
25017 .cable_detect = ata_cable_40wire,
25018 .set_piomode = cy82c693_set_piomode,
25019diff -urNp linux-2.6.32.45/drivers/ata/pata_efar.c linux-2.6.32.45/drivers/ata/pata_efar.c
25020--- linux-2.6.32.45/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
25021+++ linux-2.6.32.45/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
25022@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
25023 ATA_BMDMA_SHT(DRV_NAME),
25024 };
25025
25026-static struct ata_port_operations efar_ops = {
25027+static const struct ata_port_operations efar_ops = {
25028 .inherits = &ata_bmdma_port_ops,
25029 .cable_detect = efar_cable_detect,
25030 .set_piomode = efar_set_piomode,
25031diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt366.c linux-2.6.32.45/drivers/ata/pata_hpt366.c
25032--- linux-2.6.32.45/drivers/ata/pata_hpt366.c 2011-06-25 12:55:34.000000000 -0400
25033+++ linux-2.6.32.45/drivers/ata/pata_hpt366.c 2011-06-25 12:56:37.000000000 -0400
25034@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
25035 * Configuration for HPT366/68
25036 */
25037
25038-static struct ata_port_operations hpt366_port_ops = {
25039+static const struct ata_port_operations hpt366_port_ops = {
25040 .inherits = &ata_bmdma_port_ops,
25041 .cable_detect = hpt36x_cable_detect,
25042 .mode_filter = hpt366_filter,
25043diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt37x.c linux-2.6.32.45/drivers/ata/pata_hpt37x.c
25044--- linux-2.6.32.45/drivers/ata/pata_hpt37x.c 2011-06-25 12:55:34.000000000 -0400
25045+++ linux-2.6.32.45/drivers/ata/pata_hpt37x.c 2011-06-25 12:56:37.000000000 -0400
25046@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
25047 * Configuration for HPT370
25048 */
25049
25050-static struct ata_port_operations hpt370_port_ops = {
25051+static const struct ata_port_operations hpt370_port_ops = {
25052 .inherits = &ata_bmdma_port_ops,
25053
25054 .bmdma_stop = hpt370_bmdma_stop,
25055@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
25056 * Configuration for HPT370A. Close to 370 but less filters
25057 */
25058
25059-static struct ata_port_operations hpt370a_port_ops = {
25060+static const struct ata_port_operations hpt370a_port_ops = {
25061 .inherits = &hpt370_port_ops,
25062 .mode_filter = hpt370a_filter,
25063 };
25064@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
25065 * and DMA mode setting functionality.
25066 */
25067
25068-static struct ata_port_operations hpt372_port_ops = {
25069+static const struct ata_port_operations hpt372_port_ops = {
25070 .inherits = &ata_bmdma_port_ops,
25071
25072 .bmdma_stop = hpt37x_bmdma_stop,
25073@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
25074 * but we have a different cable detection procedure for function 1.
25075 */
25076
25077-static struct ata_port_operations hpt374_fn1_port_ops = {
25078+static const struct ata_port_operations hpt374_fn1_port_ops = {
25079 .inherits = &hpt372_port_ops,
25080 .prereset = hpt374_fn1_pre_reset,
25081 };
25082diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c
25083--- linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:55:34.000000000 -0400
25084+++ linux-2.6.32.45/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:56:37.000000000 -0400
25085@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
25086 * Configuration for HPT3x2n.
25087 */
25088
25089-static struct ata_port_operations hpt3x2n_port_ops = {
25090+static const struct ata_port_operations hpt3x2n_port_ops = {
25091 .inherits = &ata_bmdma_port_ops,
25092
25093 .bmdma_stop = hpt3x2n_bmdma_stop,
25094diff -urNp linux-2.6.32.45/drivers/ata/pata_hpt3x3.c linux-2.6.32.45/drivers/ata/pata_hpt3x3.c
25095--- linux-2.6.32.45/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
25096+++ linux-2.6.32.45/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
25097@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
25098 ATA_BMDMA_SHT(DRV_NAME),
25099 };
25100
25101-static struct ata_port_operations hpt3x3_port_ops = {
25102+static const struct ata_port_operations hpt3x3_port_ops = {
25103 .inherits = &ata_bmdma_port_ops,
25104 .cable_detect = ata_cable_40wire,
25105 .set_piomode = hpt3x3_set_piomode,
25106diff -urNp linux-2.6.32.45/drivers/ata/pata_icside.c linux-2.6.32.45/drivers/ata/pata_icside.c
25107--- linux-2.6.32.45/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
25108+++ linux-2.6.32.45/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
25109@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
25110 }
25111 }
25112
25113-static struct ata_port_operations pata_icside_port_ops = {
25114+static const struct ata_port_operations pata_icside_port_ops = {
25115 .inherits = &ata_sff_port_ops,
25116 /* no need to build any PRD tables for DMA */
25117 .qc_prep = ata_noop_qc_prep,
25118diff -urNp linux-2.6.32.45/drivers/ata/pata_isapnp.c linux-2.6.32.45/drivers/ata/pata_isapnp.c
25119--- linux-2.6.32.45/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
25120+++ linux-2.6.32.45/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
25121@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
25122 ATA_PIO_SHT(DRV_NAME),
25123 };
25124
25125-static struct ata_port_operations isapnp_port_ops = {
25126+static const struct ata_port_operations isapnp_port_ops = {
25127 .inherits = &ata_sff_port_ops,
25128 .cable_detect = ata_cable_40wire,
25129 };
25130
25131-static struct ata_port_operations isapnp_noalt_port_ops = {
25132+static const struct ata_port_operations isapnp_noalt_port_ops = {
25133 .inherits = &ata_sff_port_ops,
25134 .cable_detect = ata_cable_40wire,
25135 /* No altstatus so we don't want to use the lost interrupt poll */
25136diff -urNp linux-2.6.32.45/drivers/ata/pata_it8213.c linux-2.6.32.45/drivers/ata/pata_it8213.c
25137--- linux-2.6.32.45/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
25138+++ linux-2.6.32.45/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
25139@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
25140 };
25141
25142
25143-static struct ata_port_operations it8213_ops = {
25144+static const struct ata_port_operations it8213_ops = {
25145 .inherits = &ata_bmdma_port_ops,
25146 .cable_detect = it8213_cable_detect,
25147 .set_piomode = it8213_set_piomode,
25148diff -urNp linux-2.6.32.45/drivers/ata/pata_it821x.c linux-2.6.32.45/drivers/ata/pata_it821x.c
25149--- linux-2.6.32.45/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
25150+++ linux-2.6.32.45/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
25151@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
25152 ATA_BMDMA_SHT(DRV_NAME),
25153 };
25154
25155-static struct ata_port_operations it821x_smart_port_ops = {
25156+static const struct ata_port_operations it821x_smart_port_ops = {
25157 .inherits = &ata_bmdma_port_ops,
25158
25159 .check_atapi_dma= it821x_check_atapi_dma,
25160@@ -814,7 +814,7 @@ static struct ata_port_operations it821x
25161 .port_start = it821x_port_start,
25162 };
25163
25164-static struct ata_port_operations it821x_passthru_port_ops = {
25165+static const struct ata_port_operations it821x_passthru_port_ops = {
25166 .inherits = &ata_bmdma_port_ops,
25167
25168 .check_atapi_dma= it821x_check_atapi_dma,
25169@@ -830,7 +830,7 @@ static struct ata_port_operations it821x
25170 .port_start = it821x_port_start,
25171 };
25172
25173-static struct ata_port_operations it821x_rdc_port_ops = {
25174+static const struct ata_port_operations it821x_rdc_port_ops = {
25175 .inherits = &ata_bmdma_port_ops,
25176
25177 .check_atapi_dma= it821x_check_atapi_dma,
25178diff -urNp linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c
25179--- linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
25180+++ linux-2.6.32.45/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
25181@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
25182 ATA_PIO_SHT(DRV_NAME),
25183 };
25184
25185-static struct ata_port_operations ixp4xx_port_ops = {
25186+static const struct ata_port_operations ixp4xx_port_ops = {
25187 .inherits = &ata_sff_port_ops,
25188 .sff_data_xfer = ixp4xx_mmio_data_xfer,
25189 .cable_detect = ata_cable_40wire,
25190diff -urNp linux-2.6.32.45/drivers/ata/pata_jmicron.c linux-2.6.32.45/drivers/ata/pata_jmicron.c
25191--- linux-2.6.32.45/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
25192+++ linux-2.6.32.45/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
25193@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
25194 ATA_BMDMA_SHT(DRV_NAME),
25195 };
25196
25197-static struct ata_port_operations jmicron_ops = {
25198+static const struct ata_port_operations jmicron_ops = {
25199 .inherits = &ata_bmdma_port_ops,
25200 .prereset = jmicron_pre_reset,
25201 };
25202diff -urNp linux-2.6.32.45/drivers/ata/pata_legacy.c linux-2.6.32.45/drivers/ata/pata_legacy.c
25203--- linux-2.6.32.45/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
25204+++ linux-2.6.32.45/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
25205@@ -106,7 +106,7 @@ struct legacy_probe {
25206
25207 struct legacy_controller {
25208 const char *name;
25209- struct ata_port_operations *ops;
25210+ const struct ata_port_operations *ops;
25211 unsigned int pio_mask;
25212 unsigned int flags;
25213 unsigned int pflags;
25214@@ -223,12 +223,12 @@ static const struct ata_port_operations
25215 * pio_mask as well.
25216 */
25217
25218-static struct ata_port_operations simple_port_ops = {
25219+static const struct ata_port_operations simple_port_ops = {
25220 .inherits = &legacy_base_port_ops,
25221 .sff_data_xfer = ata_sff_data_xfer_noirq,
25222 };
25223
25224-static struct ata_port_operations legacy_port_ops = {
25225+static const struct ata_port_operations legacy_port_ops = {
25226 .inherits = &legacy_base_port_ops,
25227 .sff_data_xfer = ata_sff_data_xfer_noirq,
25228 .set_mode = legacy_set_mode,
25229@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
25230 return buflen;
25231 }
25232
25233-static struct ata_port_operations pdc20230_port_ops = {
25234+static const struct ata_port_operations pdc20230_port_ops = {
25235 .inherits = &legacy_base_port_ops,
25236 .set_piomode = pdc20230_set_piomode,
25237 .sff_data_xfer = pdc_data_xfer_vlb,
25238@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
25239 ioread8(ap->ioaddr.status_addr);
25240 }
25241
25242-static struct ata_port_operations ht6560a_port_ops = {
25243+static const struct ata_port_operations ht6560a_port_ops = {
25244 .inherits = &legacy_base_port_ops,
25245 .set_piomode = ht6560a_set_piomode,
25246 };
25247@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
25248 ioread8(ap->ioaddr.status_addr);
25249 }
25250
25251-static struct ata_port_operations ht6560b_port_ops = {
25252+static const struct ata_port_operations ht6560b_port_ops = {
25253 .inherits = &legacy_base_port_ops,
25254 .set_piomode = ht6560b_set_piomode,
25255 };
25256@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
25257 }
25258
25259
25260-static struct ata_port_operations opti82c611a_port_ops = {
25261+static const struct ata_port_operations opti82c611a_port_ops = {
25262 .inherits = &legacy_base_port_ops,
25263 .set_piomode = opti82c611a_set_piomode,
25264 };
25265@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
25266 return ata_sff_qc_issue(qc);
25267 }
25268
25269-static struct ata_port_operations opti82c46x_port_ops = {
25270+static const struct ata_port_operations opti82c46x_port_ops = {
25271 .inherits = &legacy_base_port_ops,
25272 .set_piomode = opti82c46x_set_piomode,
25273 .qc_issue = opti82c46x_qc_issue,
25274@@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
25275 return 0;
25276 }
25277
25278-static struct ata_port_operations qdi6500_port_ops = {
25279+static const struct ata_port_operations qdi6500_port_ops = {
25280 .inherits = &legacy_base_port_ops,
25281 .set_piomode = qdi6500_set_piomode,
25282 .qc_issue = qdi_qc_issue,
25283 .sff_data_xfer = vlb32_data_xfer,
25284 };
25285
25286-static struct ata_port_operations qdi6580_port_ops = {
25287+static const struct ata_port_operations qdi6580_port_ops = {
25288 .inherits = &legacy_base_port_ops,
25289 .set_piomode = qdi6580_set_piomode,
25290 .sff_data_xfer = vlb32_data_xfer,
25291 };
25292
25293-static struct ata_port_operations qdi6580dp_port_ops = {
25294+static const struct ata_port_operations qdi6580dp_port_ops = {
25295 .inherits = &legacy_base_port_ops,
25296 .set_piomode = qdi6580dp_set_piomode,
25297 .sff_data_xfer = vlb32_data_xfer,
25298@@ -855,7 +855,7 @@ static int winbond_port(struct platform_
25299 return 0;
25300 }
25301
25302-static struct ata_port_operations winbond_port_ops = {
25303+static const struct ata_port_operations winbond_port_ops = {
25304 .inherits = &legacy_base_port_ops,
25305 .set_piomode = winbond_set_piomode,
25306 .sff_data_xfer = vlb32_data_xfer,
25307@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
25308 int pio_modes = controller->pio_mask;
25309 unsigned long io = probe->port;
25310 u32 mask = (1 << probe->slot);
25311- struct ata_port_operations *ops = controller->ops;
25312+ const struct ata_port_operations *ops = controller->ops;
25313 struct legacy_data *ld = &legacy_data[probe->slot];
25314 struct ata_host *host = NULL;
25315 struct ata_port *ap;
25316diff -urNp linux-2.6.32.45/drivers/ata/pata_marvell.c linux-2.6.32.45/drivers/ata/pata_marvell.c
25317--- linux-2.6.32.45/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
25318+++ linux-2.6.32.45/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
25319@@ -100,7 +100,7 @@ static struct scsi_host_template marvell
25320 ATA_BMDMA_SHT(DRV_NAME),
25321 };
25322
25323-static struct ata_port_operations marvell_ops = {
25324+static const struct ata_port_operations marvell_ops = {
25325 .inherits = &ata_bmdma_port_ops,
25326 .cable_detect = marvell_cable_detect,
25327 .prereset = marvell_pre_reset,
25328diff -urNp linux-2.6.32.45/drivers/ata/pata_mpc52xx.c linux-2.6.32.45/drivers/ata/pata_mpc52xx.c
25329--- linux-2.6.32.45/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
25330+++ linux-2.6.32.45/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
25331@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
25332 ATA_PIO_SHT(DRV_NAME),
25333 };
25334
25335-static struct ata_port_operations mpc52xx_ata_port_ops = {
25336+static const struct ata_port_operations mpc52xx_ata_port_ops = {
25337 .inherits = &ata_bmdma_port_ops,
25338 .sff_dev_select = mpc52xx_ata_dev_select,
25339 .set_piomode = mpc52xx_ata_set_piomode,
25340diff -urNp linux-2.6.32.45/drivers/ata/pata_mpiix.c linux-2.6.32.45/drivers/ata/pata_mpiix.c
25341--- linux-2.6.32.45/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
25342+++ linux-2.6.32.45/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
25343@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
25344 ATA_PIO_SHT(DRV_NAME),
25345 };
25346
25347-static struct ata_port_operations mpiix_port_ops = {
25348+static const struct ata_port_operations mpiix_port_ops = {
25349 .inherits = &ata_sff_port_ops,
25350 .qc_issue = mpiix_qc_issue,
25351 .cable_detect = ata_cable_40wire,
25352diff -urNp linux-2.6.32.45/drivers/ata/pata_netcell.c linux-2.6.32.45/drivers/ata/pata_netcell.c
25353--- linux-2.6.32.45/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
25354+++ linux-2.6.32.45/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
25355@@ -34,7 +34,7 @@ static struct scsi_host_template netcell
25356 ATA_BMDMA_SHT(DRV_NAME),
25357 };
25358
25359-static struct ata_port_operations netcell_ops = {
25360+static const struct ata_port_operations netcell_ops = {
25361 .inherits = &ata_bmdma_port_ops,
25362 .cable_detect = ata_cable_80wire,
25363 .read_id = netcell_read_id,
25364diff -urNp linux-2.6.32.45/drivers/ata/pata_ninja32.c linux-2.6.32.45/drivers/ata/pata_ninja32.c
25365--- linux-2.6.32.45/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
25366+++ linux-2.6.32.45/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
25367@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
25368 ATA_BMDMA_SHT(DRV_NAME),
25369 };
25370
25371-static struct ata_port_operations ninja32_port_ops = {
25372+static const struct ata_port_operations ninja32_port_ops = {
25373 .inherits = &ata_bmdma_port_ops,
25374 .sff_dev_select = ninja32_dev_select,
25375 .cable_detect = ata_cable_40wire,
25376diff -urNp linux-2.6.32.45/drivers/ata/pata_ns87410.c linux-2.6.32.45/drivers/ata/pata_ns87410.c
25377--- linux-2.6.32.45/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
25378+++ linux-2.6.32.45/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
25379@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
25380 ATA_PIO_SHT(DRV_NAME),
25381 };
25382
25383-static struct ata_port_operations ns87410_port_ops = {
25384+static const struct ata_port_operations ns87410_port_ops = {
25385 .inherits = &ata_sff_port_ops,
25386 .qc_issue = ns87410_qc_issue,
25387 .cable_detect = ata_cable_40wire,
25388diff -urNp linux-2.6.32.45/drivers/ata/pata_ns87415.c linux-2.6.32.45/drivers/ata/pata_ns87415.c
25389--- linux-2.6.32.45/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
25390+++ linux-2.6.32.45/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
25391@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
25392 }
25393 #endif /* 87560 SuperIO Support */
25394
25395-static struct ata_port_operations ns87415_pata_ops = {
25396+static const struct ata_port_operations ns87415_pata_ops = {
25397 .inherits = &ata_bmdma_port_ops,
25398
25399 .check_atapi_dma = ns87415_check_atapi_dma,
25400@@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
25401 };
25402
25403 #if defined(CONFIG_SUPERIO)
25404-static struct ata_port_operations ns87560_pata_ops = {
25405+static const struct ata_port_operations ns87560_pata_ops = {
25406 .inherits = &ns87415_pata_ops,
25407 .sff_tf_read = ns87560_tf_read,
25408 .sff_check_status = ns87560_check_status,
25409diff -urNp linux-2.6.32.45/drivers/ata/pata_octeon_cf.c linux-2.6.32.45/drivers/ata/pata_octeon_cf.c
25410--- linux-2.6.32.45/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
25411+++ linux-2.6.32.45/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
25412@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
25413 return 0;
25414 }
25415
25416+/* cannot be const */
25417 static struct ata_port_operations octeon_cf_ops = {
25418 .inherits = &ata_sff_port_ops,
25419 .check_atapi_dma = octeon_cf_check_atapi_dma,
25420diff -urNp linux-2.6.32.45/drivers/ata/pata_oldpiix.c linux-2.6.32.45/drivers/ata/pata_oldpiix.c
25421--- linux-2.6.32.45/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
25422+++ linux-2.6.32.45/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
25423@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
25424 ATA_BMDMA_SHT(DRV_NAME),
25425 };
25426
25427-static struct ata_port_operations oldpiix_pata_ops = {
25428+static const struct ata_port_operations oldpiix_pata_ops = {
25429 .inherits = &ata_bmdma_port_ops,
25430 .qc_issue = oldpiix_qc_issue,
25431 .cable_detect = ata_cable_40wire,
25432diff -urNp linux-2.6.32.45/drivers/ata/pata_opti.c linux-2.6.32.45/drivers/ata/pata_opti.c
25433--- linux-2.6.32.45/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
25434+++ linux-2.6.32.45/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
25435@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
25436 ATA_PIO_SHT(DRV_NAME),
25437 };
25438
25439-static struct ata_port_operations opti_port_ops = {
25440+static const struct ata_port_operations opti_port_ops = {
25441 .inherits = &ata_sff_port_ops,
25442 .cable_detect = ata_cable_40wire,
25443 .set_piomode = opti_set_piomode,
25444diff -urNp linux-2.6.32.45/drivers/ata/pata_optidma.c linux-2.6.32.45/drivers/ata/pata_optidma.c
25445--- linux-2.6.32.45/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
25446+++ linux-2.6.32.45/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
25447@@ -337,7 +337,7 @@ static struct scsi_host_template optidma
25448 ATA_BMDMA_SHT(DRV_NAME),
25449 };
25450
25451-static struct ata_port_operations optidma_port_ops = {
25452+static const struct ata_port_operations optidma_port_ops = {
25453 .inherits = &ata_bmdma_port_ops,
25454 .cable_detect = ata_cable_40wire,
25455 .set_piomode = optidma_set_pio_mode,
25456@@ -346,7 +346,7 @@ static struct ata_port_operations optidm
25457 .prereset = optidma_pre_reset,
25458 };
25459
25460-static struct ata_port_operations optiplus_port_ops = {
25461+static const struct ata_port_operations optiplus_port_ops = {
25462 .inherits = &optidma_port_ops,
25463 .set_piomode = optiplus_set_pio_mode,
25464 .set_dmamode = optiplus_set_dma_mode,
25465diff -urNp linux-2.6.32.45/drivers/ata/pata_palmld.c linux-2.6.32.45/drivers/ata/pata_palmld.c
25466--- linux-2.6.32.45/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
25467+++ linux-2.6.32.45/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
25468@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
25469 ATA_PIO_SHT(DRV_NAME),
25470 };
25471
25472-static struct ata_port_operations palmld_port_ops = {
25473+static const struct ata_port_operations palmld_port_ops = {
25474 .inherits = &ata_sff_port_ops,
25475 .sff_data_xfer = ata_sff_data_xfer_noirq,
25476 .cable_detect = ata_cable_40wire,
25477diff -urNp linux-2.6.32.45/drivers/ata/pata_pcmcia.c linux-2.6.32.45/drivers/ata/pata_pcmcia.c
25478--- linux-2.6.32.45/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
25479+++ linux-2.6.32.45/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
25480@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
25481 ATA_PIO_SHT(DRV_NAME),
25482 };
25483
25484-static struct ata_port_operations pcmcia_port_ops = {
25485+static const struct ata_port_operations pcmcia_port_ops = {
25486 .inherits = &ata_sff_port_ops,
25487 .sff_data_xfer = ata_sff_data_xfer_noirq,
25488 .cable_detect = ata_cable_40wire,
25489 .set_mode = pcmcia_set_mode,
25490 };
25491
25492-static struct ata_port_operations pcmcia_8bit_port_ops = {
25493+static const struct ata_port_operations pcmcia_8bit_port_ops = {
25494 .inherits = &ata_sff_port_ops,
25495 .sff_data_xfer = ata_data_xfer_8bit,
25496 .cable_detect = ata_cable_40wire,
25497@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
25498 unsigned long io_base, ctl_base;
25499 void __iomem *io_addr, *ctl_addr;
25500 int n_ports = 1;
25501- struct ata_port_operations *ops = &pcmcia_port_ops;
25502+ const struct ata_port_operations *ops = &pcmcia_port_ops;
25503
25504 info = kzalloc(sizeof(*info), GFP_KERNEL);
25505 if (info == NULL)
25506diff -urNp linux-2.6.32.45/drivers/ata/pata_pdc2027x.c linux-2.6.32.45/drivers/ata/pata_pdc2027x.c
25507--- linux-2.6.32.45/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
25508+++ linux-2.6.32.45/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
25509@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
25510 ATA_BMDMA_SHT(DRV_NAME),
25511 };
25512
25513-static struct ata_port_operations pdc2027x_pata100_ops = {
25514+static const struct ata_port_operations pdc2027x_pata100_ops = {
25515 .inherits = &ata_bmdma_port_ops,
25516 .check_atapi_dma = pdc2027x_check_atapi_dma,
25517 .cable_detect = pdc2027x_cable_detect,
25518 .prereset = pdc2027x_prereset,
25519 };
25520
25521-static struct ata_port_operations pdc2027x_pata133_ops = {
25522+static const struct ata_port_operations pdc2027x_pata133_ops = {
25523 .inherits = &pdc2027x_pata100_ops,
25524 .mode_filter = pdc2027x_mode_filter,
25525 .set_piomode = pdc2027x_set_piomode,
25526diff -urNp linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c
25527--- linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
25528+++ linux-2.6.32.45/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
25529@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
25530 ATA_BMDMA_SHT(DRV_NAME),
25531 };
25532
25533-static struct ata_port_operations pdc2024x_port_ops = {
25534+static const struct ata_port_operations pdc2024x_port_ops = {
25535 .inherits = &ata_bmdma_port_ops,
25536
25537 .cable_detect = ata_cable_40wire,
25538@@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
25539 .sff_exec_command = pdc202xx_exec_command,
25540 };
25541
25542-static struct ata_port_operations pdc2026x_port_ops = {
25543+static const struct ata_port_operations pdc2026x_port_ops = {
25544 .inherits = &pdc2024x_port_ops,
25545
25546 .check_atapi_dma = pdc2026x_check_atapi_dma,
25547diff -urNp linux-2.6.32.45/drivers/ata/pata_platform.c linux-2.6.32.45/drivers/ata/pata_platform.c
25548--- linux-2.6.32.45/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
25549+++ linux-2.6.32.45/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
25550@@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
25551 ATA_PIO_SHT(DRV_NAME),
25552 };
25553
25554-static struct ata_port_operations pata_platform_port_ops = {
25555+static const struct ata_port_operations pata_platform_port_ops = {
25556 .inherits = &ata_sff_port_ops,
25557 .sff_data_xfer = ata_sff_data_xfer_noirq,
25558 .cable_detect = ata_cable_unknown,
25559diff -urNp linux-2.6.32.45/drivers/ata/pata_qdi.c linux-2.6.32.45/drivers/ata/pata_qdi.c
25560--- linux-2.6.32.45/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
25561+++ linux-2.6.32.45/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
25562@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
25563 ATA_PIO_SHT(DRV_NAME),
25564 };
25565
25566-static struct ata_port_operations qdi6500_port_ops = {
25567+static const struct ata_port_operations qdi6500_port_ops = {
25568 .inherits = &ata_sff_port_ops,
25569 .qc_issue = qdi_qc_issue,
25570 .sff_data_xfer = qdi_data_xfer,
25571@@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
25572 .set_piomode = qdi6500_set_piomode,
25573 };
25574
25575-static struct ata_port_operations qdi6580_port_ops = {
25576+static const struct ata_port_operations qdi6580_port_ops = {
25577 .inherits = &qdi6500_port_ops,
25578 .set_piomode = qdi6580_set_piomode,
25579 };
25580diff -urNp linux-2.6.32.45/drivers/ata/pata_radisys.c linux-2.6.32.45/drivers/ata/pata_radisys.c
25581--- linux-2.6.32.45/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
25582+++ linux-2.6.32.45/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
25583@@ -187,7 +187,7 @@ static struct scsi_host_template radisys
25584 ATA_BMDMA_SHT(DRV_NAME),
25585 };
25586
25587-static struct ata_port_operations radisys_pata_ops = {
25588+static const struct ata_port_operations radisys_pata_ops = {
25589 .inherits = &ata_bmdma_port_ops,
25590 .qc_issue = radisys_qc_issue,
25591 .cable_detect = ata_cable_unknown,
25592diff -urNp linux-2.6.32.45/drivers/ata/pata_rb532_cf.c linux-2.6.32.45/drivers/ata/pata_rb532_cf.c
25593--- linux-2.6.32.45/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
25594+++ linux-2.6.32.45/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
25595@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
25596 return IRQ_HANDLED;
25597 }
25598
25599-static struct ata_port_operations rb532_pata_port_ops = {
25600+static const struct ata_port_operations rb532_pata_port_ops = {
25601 .inherits = &ata_sff_port_ops,
25602 .sff_data_xfer = ata_sff_data_xfer32,
25603 };
25604diff -urNp linux-2.6.32.45/drivers/ata/pata_rdc.c linux-2.6.32.45/drivers/ata/pata_rdc.c
25605--- linux-2.6.32.45/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
25606+++ linux-2.6.32.45/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
25607@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
25608 pci_write_config_byte(dev, 0x48, udma_enable);
25609 }
25610
25611-static struct ata_port_operations rdc_pata_ops = {
25612+static const struct ata_port_operations rdc_pata_ops = {
25613 .inherits = &ata_bmdma32_port_ops,
25614 .cable_detect = rdc_pata_cable_detect,
25615 .set_piomode = rdc_set_piomode,
25616diff -urNp linux-2.6.32.45/drivers/ata/pata_rz1000.c linux-2.6.32.45/drivers/ata/pata_rz1000.c
25617--- linux-2.6.32.45/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
25618+++ linux-2.6.32.45/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
25619@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
25620 ATA_PIO_SHT(DRV_NAME),
25621 };
25622
25623-static struct ata_port_operations rz1000_port_ops = {
25624+static const struct ata_port_operations rz1000_port_ops = {
25625 .inherits = &ata_sff_port_ops,
25626 .cable_detect = ata_cable_40wire,
25627 .set_mode = rz1000_set_mode,
25628diff -urNp linux-2.6.32.45/drivers/ata/pata_sc1200.c linux-2.6.32.45/drivers/ata/pata_sc1200.c
25629--- linux-2.6.32.45/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
25630+++ linux-2.6.32.45/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
25631@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
25632 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25633 };
25634
25635-static struct ata_port_operations sc1200_port_ops = {
25636+static const struct ata_port_operations sc1200_port_ops = {
25637 .inherits = &ata_bmdma_port_ops,
25638 .qc_prep = ata_sff_dumb_qc_prep,
25639 .qc_issue = sc1200_qc_issue,
25640diff -urNp linux-2.6.32.45/drivers/ata/pata_scc.c linux-2.6.32.45/drivers/ata/pata_scc.c
25641--- linux-2.6.32.45/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
25642+++ linux-2.6.32.45/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
25643@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
25644 ATA_BMDMA_SHT(DRV_NAME),
25645 };
25646
25647-static struct ata_port_operations scc_pata_ops = {
25648+static const struct ata_port_operations scc_pata_ops = {
25649 .inherits = &ata_bmdma_port_ops,
25650
25651 .set_piomode = scc_set_piomode,
25652diff -urNp linux-2.6.32.45/drivers/ata/pata_sch.c linux-2.6.32.45/drivers/ata/pata_sch.c
25653--- linux-2.6.32.45/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
25654+++ linux-2.6.32.45/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
25655@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
25656 ATA_BMDMA_SHT(DRV_NAME),
25657 };
25658
25659-static struct ata_port_operations sch_pata_ops = {
25660+static const struct ata_port_operations sch_pata_ops = {
25661 .inherits = &ata_bmdma_port_ops,
25662 .cable_detect = ata_cable_unknown,
25663 .set_piomode = sch_set_piomode,
25664diff -urNp linux-2.6.32.45/drivers/ata/pata_serverworks.c linux-2.6.32.45/drivers/ata/pata_serverworks.c
25665--- linux-2.6.32.45/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
25666+++ linux-2.6.32.45/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
25667@@ -299,7 +299,7 @@ static struct scsi_host_template serverw
25668 ATA_BMDMA_SHT(DRV_NAME),
25669 };
25670
25671-static struct ata_port_operations serverworks_osb4_port_ops = {
25672+static const struct ata_port_operations serverworks_osb4_port_ops = {
25673 .inherits = &ata_bmdma_port_ops,
25674 .cable_detect = serverworks_cable_detect,
25675 .mode_filter = serverworks_osb4_filter,
25676@@ -307,7 +307,7 @@ static struct ata_port_operations server
25677 .set_dmamode = serverworks_set_dmamode,
25678 };
25679
25680-static struct ata_port_operations serverworks_csb_port_ops = {
25681+static const struct ata_port_operations serverworks_csb_port_ops = {
25682 .inherits = &serverworks_osb4_port_ops,
25683 .mode_filter = serverworks_csb_filter,
25684 };
25685diff -urNp linux-2.6.32.45/drivers/ata/pata_sil680.c linux-2.6.32.45/drivers/ata/pata_sil680.c
25686--- linux-2.6.32.45/drivers/ata/pata_sil680.c 2011-06-25 12:55:34.000000000 -0400
25687+++ linux-2.6.32.45/drivers/ata/pata_sil680.c 2011-06-25 12:56:37.000000000 -0400
25688@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
25689 ATA_BMDMA_SHT(DRV_NAME),
25690 };
25691
25692-static struct ata_port_operations sil680_port_ops = {
25693+static const struct ata_port_operations sil680_port_ops = {
25694 .inherits = &ata_bmdma32_port_ops,
25695 .cable_detect = sil680_cable_detect,
25696 .set_piomode = sil680_set_piomode,
25697diff -urNp linux-2.6.32.45/drivers/ata/pata_sis.c linux-2.6.32.45/drivers/ata/pata_sis.c
25698--- linux-2.6.32.45/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
25699+++ linux-2.6.32.45/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
25700@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
25701 ATA_BMDMA_SHT(DRV_NAME),
25702 };
25703
25704-static struct ata_port_operations sis_133_for_sata_ops = {
25705+static const struct ata_port_operations sis_133_for_sata_ops = {
25706 .inherits = &ata_bmdma_port_ops,
25707 .set_piomode = sis_133_set_piomode,
25708 .set_dmamode = sis_133_set_dmamode,
25709 .cable_detect = sis_133_cable_detect,
25710 };
25711
25712-static struct ata_port_operations sis_base_ops = {
25713+static const struct ata_port_operations sis_base_ops = {
25714 .inherits = &ata_bmdma_port_ops,
25715 .prereset = sis_pre_reset,
25716 };
25717
25718-static struct ata_port_operations sis_133_ops = {
25719+static const struct ata_port_operations sis_133_ops = {
25720 .inherits = &sis_base_ops,
25721 .set_piomode = sis_133_set_piomode,
25722 .set_dmamode = sis_133_set_dmamode,
25723 .cable_detect = sis_133_cable_detect,
25724 };
25725
25726-static struct ata_port_operations sis_133_early_ops = {
25727+static const struct ata_port_operations sis_133_early_ops = {
25728 .inherits = &sis_base_ops,
25729 .set_piomode = sis_100_set_piomode,
25730 .set_dmamode = sis_133_early_set_dmamode,
25731 .cable_detect = sis_66_cable_detect,
25732 };
25733
25734-static struct ata_port_operations sis_100_ops = {
25735+static const struct ata_port_operations sis_100_ops = {
25736 .inherits = &sis_base_ops,
25737 .set_piomode = sis_100_set_piomode,
25738 .set_dmamode = sis_100_set_dmamode,
25739 .cable_detect = sis_66_cable_detect,
25740 };
25741
25742-static struct ata_port_operations sis_66_ops = {
25743+static const struct ata_port_operations sis_66_ops = {
25744 .inherits = &sis_base_ops,
25745 .set_piomode = sis_old_set_piomode,
25746 .set_dmamode = sis_66_set_dmamode,
25747 .cable_detect = sis_66_cable_detect,
25748 };
25749
25750-static struct ata_port_operations sis_old_ops = {
25751+static const struct ata_port_operations sis_old_ops = {
25752 .inherits = &sis_base_ops,
25753 .set_piomode = sis_old_set_piomode,
25754 .set_dmamode = sis_old_set_dmamode,
25755diff -urNp linux-2.6.32.45/drivers/ata/pata_sl82c105.c linux-2.6.32.45/drivers/ata/pata_sl82c105.c
25756--- linux-2.6.32.45/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
25757+++ linux-2.6.32.45/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
25758@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
25759 ATA_BMDMA_SHT(DRV_NAME),
25760 };
25761
25762-static struct ata_port_operations sl82c105_port_ops = {
25763+static const struct ata_port_operations sl82c105_port_ops = {
25764 .inherits = &ata_bmdma_port_ops,
25765 .qc_defer = sl82c105_qc_defer,
25766 .bmdma_start = sl82c105_bmdma_start,
25767diff -urNp linux-2.6.32.45/drivers/ata/pata_triflex.c linux-2.6.32.45/drivers/ata/pata_triflex.c
25768--- linux-2.6.32.45/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
25769+++ linux-2.6.32.45/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
25770@@ -178,7 +178,7 @@ static struct scsi_host_template triflex
25771 ATA_BMDMA_SHT(DRV_NAME),
25772 };
25773
25774-static struct ata_port_operations triflex_port_ops = {
25775+static const struct ata_port_operations triflex_port_ops = {
25776 .inherits = &ata_bmdma_port_ops,
25777 .bmdma_start = triflex_bmdma_start,
25778 .bmdma_stop = triflex_bmdma_stop,
25779diff -urNp linux-2.6.32.45/drivers/ata/pata_via.c linux-2.6.32.45/drivers/ata/pata_via.c
25780--- linux-2.6.32.45/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
25781+++ linux-2.6.32.45/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
25782@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
25783 ATA_BMDMA_SHT(DRV_NAME),
25784 };
25785
25786-static struct ata_port_operations via_port_ops = {
25787+static const struct ata_port_operations via_port_ops = {
25788 .inherits = &ata_bmdma_port_ops,
25789 .cable_detect = via_cable_detect,
25790 .set_piomode = via_set_piomode,
25791@@ -429,7 +429,7 @@ static struct ata_port_operations via_po
25792 .port_start = via_port_start,
25793 };
25794
25795-static struct ata_port_operations via_port_ops_noirq = {
25796+static const struct ata_port_operations via_port_ops_noirq = {
25797 .inherits = &via_port_ops,
25798 .sff_data_xfer = ata_sff_data_xfer_noirq,
25799 };
25800diff -urNp linux-2.6.32.45/drivers/ata/pata_winbond.c linux-2.6.32.45/drivers/ata/pata_winbond.c
25801--- linux-2.6.32.45/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
25802+++ linux-2.6.32.45/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
25803@@ -125,7 +125,7 @@ static struct scsi_host_template winbond
25804 ATA_PIO_SHT(DRV_NAME),
25805 };
25806
25807-static struct ata_port_operations winbond_port_ops = {
25808+static const struct ata_port_operations winbond_port_ops = {
25809 .inherits = &ata_sff_port_ops,
25810 .sff_data_xfer = winbond_data_xfer,
25811 .cable_detect = ata_cable_40wire,
25812diff -urNp linux-2.6.32.45/drivers/ata/pdc_adma.c linux-2.6.32.45/drivers/ata/pdc_adma.c
25813--- linux-2.6.32.45/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
25814+++ linux-2.6.32.45/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
25815@@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
25816 .dma_boundary = ADMA_DMA_BOUNDARY,
25817 };
25818
25819-static struct ata_port_operations adma_ata_ops = {
25820+static const struct ata_port_operations adma_ata_ops = {
25821 .inherits = &ata_sff_port_ops,
25822
25823 .lost_interrupt = ATA_OP_NULL,
25824diff -urNp linux-2.6.32.45/drivers/ata/sata_fsl.c linux-2.6.32.45/drivers/ata/sata_fsl.c
25825--- linux-2.6.32.45/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
25826+++ linux-2.6.32.45/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
25827@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
25828 .dma_boundary = ATA_DMA_BOUNDARY,
25829 };
25830
25831-static struct ata_port_operations sata_fsl_ops = {
25832+static const struct ata_port_operations sata_fsl_ops = {
25833 .inherits = &sata_pmp_port_ops,
25834
25835 .qc_defer = ata_std_qc_defer,
25836diff -urNp linux-2.6.32.45/drivers/ata/sata_inic162x.c linux-2.6.32.45/drivers/ata/sata_inic162x.c
25837--- linux-2.6.32.45/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
25838+++ linux-2.6.32.45/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
25839@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
25840 return 0;
25841 }
25842
25843-static struct ata_port_operations inic_port_ops = {
25844+static const struct ata_port_operations inic_port_ops = {
25845 .inherits = &sata_port_ops,
25846
25847 .check_atapi_dma = inic_check_atapi_dma,
25848diff -urNp linux-2.6.32.45/drivers/ata/sata_mv.c linux-2.6.32.45/drivers/ata/sata_mv.c
25849--- linux-2.6.32.45/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
25850+++ linux-2.6.32.45/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
25851@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
25852 .dma_boundary = MV_DMA_BOUNDARY,
25853 };
25854
25855-static struct ata_port_operations mv5_ops = {
25856+static const struct ata_port_operations mv5_ops = {
25857 .inherits = &ata_sff_port_ops,
25858
25859 .lost_interrupt = ATA_OP_NULL,
25860@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
25861 .port_stop = mv_port_stop,
25862 };
25863
25864-static struct ata_port_operations mv6_ops = {
25865+static const struct ata_port_operations mv6_ops = {
25866 .inherits = &mv5_ops,
25867 .dev_config = mv6_dev_config,
25868 .scr_read = mv_scr_read,
25869@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
25870 .bmdma_status = mv_bmdma_status,
25871 };
25872
25873-static struct ata_port_operations mv_iie_ops = {
25874+static const struct ata_port_operations mv_iie_ops = {
25875 .inherits = &mv6_ops,
25876 .dev_config = ATA_OP_NULL,
25877 .qc_prep = mv_qc_prep_iie,
25878diff -urNp linux-2.6.32.45/drivers/ata/sata_nv.c linux-2.6.32.45/drivers/ata/sata_nv.c
25879--- linux-2.6.32.45/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
25880+++ linux-2.6.32.45/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
25881@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
25882 * cases. Define nv_hardreset() which only kicks in for post-boot
25883 * probing and use it for all variants.
25884 */
25885-static struct ata_port_operations nv_generic_ops = {
25886+static const struct ata_port_operations nv_generic_ops = {
25887 .inherits = &ata_bmdma_port_ops,
25888 .lost_interrupt = ATA_OP_NULL,
25889 .scr_read = nv_scr_read,
25890@@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
25891 .hardreset = nv_hardreset,
25892 };
25893
25894-static struct ata_port_operations nv_nf2_ops = {
25895+static const struct ata_port_operations nv_nf2_ops = {
25896 .inherits = &nv_generic_ops,
25897 .freeze = nv_nf2_freeze,
25898 .thaw = nv_nf2_thaw,
25899 };
25900
25901-static struct ata_port_operations nv_ck804_ops = {
25902+static const struct ata_port_operations nv_ck804_ops = {
25903 .inherits = &nv_generic_ops,
25904 .freeze = nv_ck804_freeze,
25905 .thaw = nv_ck804_thaw,
25906 .host_stop = nv_ck804_host_stop,
25907 };
25908
25909-static struct ata_port_operations nv_adma_ops = {
25910+static const struct ata_port_operations nv_adma_ops = {
25911 .inherits = &nv_ck804_ops,
25912
25913 .check_atapi_dma = nv_adma_check_atapi_dma,
25914@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
25915 .host_stop = nv_adma_host_stop,
25916 };
25917
25918-static struct ata_port_operations nv_swncq_ops = {
25919+static const struct ata_port_operations nv_swncq_ops = {
25920 .inherits = &nv_generic_ops,
25921
25922 .qc_defer = ata_std_qc_defer,
25923diff -urNp linux-2.6.32.45/drivers/ata/sata_promise.c linux-2.6.32.45/drivers/ata/sata_promise.c
25924--- linux-2.6.32.45/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
25925+++ linux-2.6.32.45/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
25926@@ -195,7 +195,7 @@ static const struct ata_port_operations
25927 .error_handler = pdc_error_handler,
25928 };
25929
25930-static struct ata_port_operations pdc_sata_ops = {
25931+static const struct ata_port_operations pdc_sata_ops = {
25932 .inherits = &pdc_common_ops,
25933 .cable_detect = pdc_sata_cable_detect,
25934 .freeze = pdc_sata_freeze,
25935@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
25936
25937 /* First-generation chips need a more restrictive ->check_atapi_dma op,
25938 and ->freeze/thaw that ignore the hotplug controls. */
25939-static struct ata_port_operations pdc_old_sata_ops = {
25940+static const struct ata_port_operations pdc_old_sata_ops = {
25941 .inherits = &pdc_sata_ops,
25942 .freeze = pdc_freeze,
25943 .thaw = pdc_thaw,
25944 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
25945 };
25946
25947-static struct ata_port_operations pdc_pata_ops = {
25948+static const struct ata_port_operations pdc_pata_ops = {
25949 .inherits = &pdc_common_ops,
25950 .cable_detect = pdc_pata_cable_detect,
25951 .freeze = pdc_freeze,
25952diff -urNp linux-2.6.32.45/drivers/ata/sata_qstor.c linux-2.6.32.45/drivers/ata/sata_qstor.c
25953--- linux-2.6.32.45/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
25954+++ linux-2.6.32.45/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
25955@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
25956 .dma_boundary = QS_DMA_BOUNDARY,
25957 };
25958
25959-static struct ata_port_operations qs_ata_ops = {
25960+static const struct ata_port_operations qs_ata_ops = {
25961 .inherits = &ata_sff_port_ops,
25962
25963 .check_atapi_dma = qs_check_atapi_dma,
25964diff -urNp linux-2.6.32.45/drivers/ata/sata_sil24.c linux-2.6.32.45/drivers/ata/sata_sil24.c
25965--- linux-2.6.32.45/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
25966+++ linux-2.6.32.45/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
25967@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
25968 .dma_boundary = ATA_DMA_BOUNDARY,
25969 };
25970
25971-static struct ata_port_operations sil24_ops = {
25972+static const struct ata_port_operations sil24_ops = {
25973 .inherits = &sata_pmp_port_ops,
25974
25975 .qc_defer = sil24_qc_defer,
25976diff -urNp linux-2.6.32.45/drivers/ata/sata_sil.c linux-2.6.32.45/drivers/ata/sata_sil.c
25977--- linux-2.6.32.45/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
25978+++ linux-2.6.32.45/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
25979@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
25980 .sg_tablesize = ATA_MAX_PRD
25981 };
25982
25983-static struct ata_port_operations sil_ops = {
25984+static const struct ata_port_operations sil_ops = {
25985 .inherits = &ata_bmdma32_port_ops,
25986 .dev_config = sil_dev_config,
25987 .set_mode = sil_set_mode,
25988diff -urNp linux-2.6.32.45/drivers/ata/sata_sis.c linux-2.6.32.45/drivers/ata/sata_sis.c
25989--- linux-2.6.32.45/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
25990+++ linux-2.6.32.45/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
25991@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
25992 ATA_BMDMA_SHT(DRV_NAME),
25993 };
25994
25995-static struct ata_port_operations sis_ops = {
25996+static const struct ata_port_operations sis_ops = {
25997 .inherits = &ata_bmdma_port_ops,
25998 .scr_read = sis_scr_read,
25999 .scr_write = sis_scr_write,
26000diff -urNp linux-2.6.32.45/drivers/ata/sata_svw.c linux-2.6.32.45/drivers/ata/sata_svw.c
26001--- linux-2.6.32.45/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
26002+++ linux-2.6.32.45/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
26003@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
26004 };
26005
26006
26007-static struct ata_port_operations k2_sata_ops = {
26008+static const struct ata_port_operations k2_sata_ops = {
26009 .inherits = &ata_bmdma_port_ops,
26010 .sff_tf_load = k2_sata_tf_load,
26011 .sff_tf_read = k2_sata_tf_read,
26012diff -urNp linux-2.6.32.45/drivers/ata/sata_sx4.c linux-2.6.32.45/drivers/ata/sata_sx4.c
26013--- linux-2.6.32.45/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
26014+++ linux-2.6.32.45/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
26015@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
26016 };
26017
26018 /* TODO: inherit from base port_ops after converting to new EH */
26019-static struct ata_port_operations pdc_20621_ops = {
26020+static const struct ata_port_operations pdc_20621_ops = {
26021 .inherits = &ata_sff_port_ops,
26022
26023 .check_atapi_dma = pdc_check_atapi_dma,
26024diff -urNp linux-2.6.32.45/drivers/ata/sata_uli.c linux-2.6.32.45/drivers/ata/sata_uli.c
26025--- linux-2.6.32.45/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
26026+++ linux-2.6.32.45/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
26027@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
26028 ATA_BMDMA_SHT(DRV_NAME),
26029 };
26030
26031-static struct ata_port_operations uli_ops = {
26032+static const struct ata_port_operations uli_ops = {
26033 .inherits = &ata_bmdma_port_ops,
26034 .scr_read = uli_scr_read,
26035 .scr_write = uli_scr_write,
26036diff -urNp linux-2.6.32.45/drivers/ata/sata_via.c linux-2.6.32.45/drivers/ata/sata_via.c
26037--- linux-2.6.32.45/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
26038+++ linux-2.6.32.45/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
26039@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
26040 ATA_BMDMA_SHT(DRV_NAME),
26041 };
26042
26043-static struct ata_port_operations svia_base_ops = {
26044+static const struct ata_port_operations svia_base_ops = {
26045 .inherits = &ata_bmdma_port_ops,
26046 .sff_tf_load = svia_tf_load,
26047 };
26048
26049-static struct ata_port_operations vt6420_sata_ops = {
26050+static const struct ata_port_operations vt6420_sata_ops = {
26051 .inherits = &svia_base_ops,
26052 .freeze = svia_noop_freeze,
26053 .prereset = vt6420_prereset,
26054 .bmdma_start = vt6420_bmdma_start,
26055 };
26056
26057-static struct ata_port_operations vt6421_pata_ops = {
26058+static const struct ata_port_operations vt6421_pata_ops = {
26059 .inherits = &svia_base_ops,
26060 .cable_detect = vt6421_pata_cable_detect,
26061 .set_piomode = vt6421_set_pio_mode,
26062 .set_dmamode = vt6421_set_dma_mode,
26063 };
26064
26065-static struct ata_port_operations vt6421_sata_ops = {
26066+static const struct ata_port_operations vt6421_sata_ops = {
26067 .inherits = &svia_base_ops,
26068 .scr_read = svia_scr_read,
26069 .scr_write = svia_scr_write,
26070 };
26071
26072-static struct ata_port_operations vt8251_ops = {
26073+static const struct ata_port_operations vt8251_ops = {
26074 .inherits = &svia_base_ops,
26075 .hardreset = sata_std_hardreset,
26076 .scr_read = vt8251_scr_read,
26077diff -urNp linux-2.6.32.45/drivers/ata/sata_vsc.c linux-2.6.32.45/drivers/ata/sata_vsc.c
26078--- linux-2.6.32.45/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
26079+++ linux-2.6.32.45/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
26080@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
26081 };
26082
26083
26084-static struct ata_port_operations vsc_sata_ops = {
26085+static const struct ata_port_operations vsc_sata_ops = {
26086 .inherits = &ata_bmdma_port_ops,
26087 /* The IRQ handling is not quite standard SFF behaviour so we
26088 cannot use the default lost interrupt handler */
26089diff -urNp linux-2.6.32.45/drivers/atm/adummy.c linux-2.6.32.45/drivers/atm/adummy.c
26090--- linux-2.6.32.45/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
26091+++ linux-2.6.32.45/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
26092@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
26093 vcc->pop(vcc, skb);
26094 else
26095 dev_kfree_skb_any(skb);
26096- atomic_inc(&vcc->stats->tx);
26097+ atomic_inc_unchecked(&vcc->stats->tx);
26098
26099 return 0;
26100 }
26101diff -urNp linux-2.6.32.45/drivers/atm/ambassador.c linux-2.6.32.45/drivers/atm/ambassador.c
26102--- linux-2.6.32.45/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
26103+++ linux-2.6.32.45/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
26104@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
26105 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
26106
26107 // VC layer stats
26108- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26109+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26110
26111 // free the descriptor
26112 kfree (tx_descr);
26113@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
26114 dump_skb ("<<<", vc, skb);
26115
26116 // VC layer stats
26117- atomic_inc(&atm_vcc->stats->rx);
26118+ atomic_inc_unchecked(&atm_vcc->stats->rx);
26119 __net_timestamp(skb);
26120 // end of our responsability
26121 atm_vcc->push (atm_vcc, skb);
26122@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
26123 } else {
26124 PRINTK (KERN_INFO, "dropped over-size frame");
26125 // should we count this?
26126- atomic_inc(&atm_vcc->stats->rx_drop);
26127+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26128 }
26129
26130 } else {
26131@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
26132 }
26133
26134 if (check_area (skb->data, skb->len)) {
26135- atomic_inc(&atm_vcc->stats->tx_err);
26136+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
26137 return -ENOMEM; // ?
26138 }
26139
26140diff -urNp linux-2.6.32.45/drivers/atm/atmtcp.c linux-2.6.32.45/drivers/atm/atmtcp.c
26141--- linux-2.6.32.45/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
26142+++ linux-2.6.32.45/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
26143@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
26144 if (vcc->pop) vcc->pop(vcc,skb);
26145 else dev_kfree_skb(skb);
26146 if (dev_data) return 0;
26147- atomic_inc(&vcc->stats->tx_err);
26148+ atomic_inc_unchecked(&vcc->stats->tx_err);
26149 return -ENOLINK;
26150 }
26151 size = skb->len+sizeof(struct atmtcp_hdr);
26152@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
26153 if (!new_skb) {
26154 if (vcc->pop) vcc->pop(vcc,skb);
26155 else dev_kfree_skb(skb);
26156- atomic_inc(&vcc->stats->tx_err);
26157+ atomic_inc_unchecked(&vcc->stats->tx_err);
26158 return -ENOBUFS;
26159 }
26160 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
26161@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
26162 if (vcc->pop) vcc->pop(vcc,skb);
26163 else dev_kfree_skb(skb);
26164 out_vcc->push(out_vcc,new_skb);
26165- atomic_inc(&vcc->stats->tx);
26166- atomic_inc(&out_vcc->stats->rx);
26167+ atomic_inc_unchecked(&vcc->stats->tx);
26168+ atomic_inc_unchecked(&out_vcc->stats->rx);
26169 return 0;
26170 }
26171
26172@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
26173 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
26174 read_unlock(&vcc_sklist_lock);
26175 if (!out_vcc) {
26176- atomic_inc(&vcc->stats->tx_err);
26177+ atomic_inc_unchecked(&vcc->stats->tx_err);
26178 goto done;
26179 }
26180 skb_pull(skb,sizeof(struct atmtcp_hdr));
26181@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
26182 __net_timestamp(new_skb);
26183 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
26184 out_vcc->push(out_vcc,new_skb);
26185- atomic_inc(&vcc->stats->tx);
26186- atomic_inc(&out_vcc->stats->rx);
26187+ atomic_inc_unchecked(&vcc->stats->tx);
26188+ atomic_inc_unchecked(&out_vcc->stats->rx);
26189 done:
26190 if (vcc->pop) vcc->pop(vcc,skb);
26191 else dev_kfree_skb(skb);
26192diff -urNp linux-2.6.32.45/drivers/atm/eni.c linux-2.6.32.45/drivers/atm/eni.c
26193--- linux-2.6.32.45/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
26194+++ linux-2.6.32.45/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
26195@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
26196 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
26197 vcc->dev->number);
26198 length = 0;
26199- atomic_inc(&vcc->stats->rx_err);
26200+ atomic_inc_unchecked(&vcc->stats->rx_err);
26201 }
26202 else {
26203 length = ATM_CELL_SIZE-1; /* no HEC */
26204@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26205 size);
26206 }
26207 eff = length = 0;
26208- atomic_inc(&vcc->stats->rx_err);
26209+ atomic_inc_unchecked(&vcc->stats->rx_err);
26210 }
26211 else {
26212 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
26213@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26214 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
26215 vcc->dev->number,vcc->vci,length,size << 2,descr);
26216 length = eff = 0;
26217- atomic_inc(&vcc->stats->rx_err);
26218+ atomic_inc_unchecked(&vcc->stats->rx_err);
26219 }
26220 }
26221 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
26222@@ -770,7 +770,7 @@ rx_dequeued++;
26223 vcc->push(vcc,skb);
26224 pushed++;
26225 }
26226- atomic_inc(&vcc->stats->rx);
26227+ atomic_inc_unchecked(&vcc->stats->rx);
26228 }
26229 wake_up(&eni_dev->rx_wait);
26230 }
26231@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
26232 PCI_DMA_TODEVICE);
26233 if (vcc->pop) vcc->pop(vcc,skb);
26234 else dev_kfree_skb_irq(skb);
26235- atomic_inc(&vcc->stats->tx);
26236+ atomic_inc_unchecked(&vcc->stats->tx);
26237 wake_up(&eni_dev->tx_wait);
26238 dma_complete++;
26239 }
26240diff -urNp linux-2.6.32.45/drivers/atm/firestream.c linux-2.6.32.45/drivers/atm/firestream.c
26241--- linux-2.6.32.45/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
26242+++ linux-2.6.32.45/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
26243@@ -748,7 +748,7 @@ static void process_txdone_queue (struct
26244 }
26245 }
26246
26247- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26248+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26249
26250 fs_dprintk (FS_DEBUG_TXMEM, "i");
26251 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
26252@@ -815,7 +815,7 @@ static void process_incoming (struct fs_
26253 #endif
26254 skb_put (skb, qe->p1 & 0xffff);
26255 ATM_SKB(skb)->vcc = atm_vcc;
26256- atomic_inc(&atm_vcc->stats->rx);
26257+ atomic_inc_unchecked(&atm_vcc->stats->rx);
26258 __net_timestamp(skb);
26259 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
26260 atm_vcc->push (atm_vcc, skb);
26261@@ -836,12 +836,12 @@ static void process_incoming (struct fs_
26262 kfree (pe);
26263 }
26264 if (atm_vcc)
26265- atomic_inc(&atm_vcc->stats->rx_drop);
26266+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26267 break;
26268 case 0x1f: /* Reassembly abort: no buffers. */
26269 /* Silently increment error counter. */
26270 if (atm_vcc)
26271- atomic_inc(&atm_vcc->stats->rx_drop);
26272+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26273 break;
26274 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
26275 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
26276diff -urNp linux-2.6.32.45/drivers/atm/fore200e.c linux-2.6.32.45/drivers/atm/fore200e.c
26277--- linux-2.6.32.45/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
26278+++ linux-2.6.32.45/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
26279@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
26280 #endif
26281 /* check error condition */
26282 if (*entry->status & STATUS_ERROR)
26283- atomic_inc(&vcc->stats->tx_err);
26284+ atomic_inc_unchecked(&vcc->stats->tx_err);
26285 else
26286- atomic_inc(&vcc->stats->tx);
26287+ atomic_inc_unchecked(&vcc->stats->tx);
26288 }
26289 }
26290
26291@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
26292 if (skb == NULL) {
26293 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
26294
26295- atomic_inc(&vcc->stats->rx_drop);
26296+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26297 return -ENOMEM;
26298 }
26299
26300@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
26301
26302 dev_kfree_skb_any(skb);
26303
26304- atomic_inc(&vcc->stats->rx_drop);
26305+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26306 return -ENOMEM;
26307 }
26308
26309 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26310
26311 vcc->push(vcc, skb);
26312- atomic_inc(&vcc->stats->rx);
26313+ atomic_inc_unchecked(&vcc->stats->rx);
26314
26315 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26316
26317@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
26318 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
26319 fore200e->atm_dev->number,
26320 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
26321- atomic_inc(&vcc->stats->rx_err);
26322+ atomic_inc_unchecked(&vcc->stats->rx_err);
26323 }
26324 }
26325
26326@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
26327 goto retry_here;
26328 }
26329
26330- atomic_inc(&vcc->stats->tx_err);
26331+ atomic_inc_unchecked(&vcc->stats->tx_err);
26332
26333 fore200e->tx_sat++;
26334 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
26335diff -urNp linux-2.6.32.45/drivers/atm/he.c linux-2.6.32.45/drivers/atm/he.c
26336--- linux-2.6.32.45/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
26337+++ linux-2.6.32.45/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
26338@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26339
26340 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
26341 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
26342- atomic_inc(&vcc->stats->rx_drop);
26343+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26344 goto return_host_buffers;
26345 }
26346
26347@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26348 RBRQ_LEN_ERR(he_dev->rbrq_head)
26349 ? "LEN_ERR" : "",
26350 vcc->vpi, vcc->vci);
26351- atomic_inc(&vcc->stats->rx_err);
26352+ atomic_inc_unchecked(&vcc->stats->rx_err);
26353 goto return_host_buffers;
26354 }
26355
26356@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26357 vcc->push(vcc, skb);
26358 spin_lock(&he_dev->global_lock);
26359
26360- atomic_inc(&vcc->stats->rx);
26361+ atomic_inc_unchecked(&vcc->stats->rx);
26362
26363 return_host_buffers:
26364 ++pdus_assembled;
26365@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
26366 tpd->vcc->pop(tpd->vcc, tpd->skb);
26367 else
26368 dev_kfree_skb_any(tpd->skb);
26369- atomic_inc(&tpd->vcc->stats->tx_err);
26370+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
26371 }
26372 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
26373 return;
26374@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26375 vcc->pop(vcc, skb);
26376 else
26377 dev_kfree_skb_any(skb);
26378- atomic_inc(&vcc->stats->tx_err);
26379+ atomic_inc_unchecked(&vcc->stats->tx_err);
26380 return -EINVAL;
26381 }
26382
26383@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26384 vcc->pop(vcc, skb);
26385 else
26386 dev_kfree_skb_any(skb);
26387- atomic_inc(&vcc->stats->tx_err);
26388+ atomic_inc_unchecked(&vcc->stats->tx_err);
26389 return -EINVAL;
26390 }
26391 #endif
26392@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26393 vcc->pop(vcc, skb);
26394 else
26395 dev_kfree_skb_any(skb);
26396- atomic_inc(&vcc->stats->tx_err);
26397+ atomic_inc_unchecked(&vcc->stats->tx_err);
26398 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26399 return -ENOMEM;
26400 }
26401@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26402 vcc->pop(vcc, skb);
26403 else
26404 dev_kfree_skb_any(skb);
26405- atomic_inc(&vcc->stats->tx_err);
26406+ atomic_inc_unchecked(&vcc->stats->tx_err);
26407 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26408 return -ENOMEM;
26409 }
26410@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26411 __enqueue_tpd(he_dev, tpd, cid);
26412 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26413
26414- atomic_inc(&vcc->stats->tx);
26415+ atomic_inc_unchecked(&vcc->stats->tx);
26416
26417 return 0;
26418 }
26419diff -urNp linux-2.6.32.45/drivers/atm/horizon.c linux-2.6.32.45/drivers/atm/horizon.c
26420--- linux-2.6.32.45/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
26421+++ linux-2.6.32.45/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
26422@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
26423 {
26424 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
26425 // VC layer stats
26426- atomic_inc(&vcc->stats->rx);
26427+ atomic_inc_unchecked(&vcc->stats->rx);
26428 __net_timestamp(skb);
26429 // end of our responsability
26430 vcc->push (vcc, skb);
26431@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
26432 dev->tx_iovec = NULL;
26433
26434 // VC layer stats
26435- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26436+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26437
26438 // free the skb
26439 hrz_kfree_skb (skb);
26440diff -urNp linux-2.6.32.45/drivers/atm/idt77252.c linux-2.6.32.45/drivers/atm/idt77252.c
26441--- linux-2.6.32.45/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
26442+++ linux-2.6.32.45/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
26443@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
26444 else
26445 dev_kfree_skb(skb);
26446
26447- atomic_inc(&vcc->stats->tx);
26448+ atomic_inc_unchecked(&vcc->stats->tx);
26449 }
26450
26451 atomic_dec(&scq->used);
26452@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
26453 if ((sb = dev_alloc_skb(64)) == NULL) {
26454 printk("%s: Can't allocate buffers for aal0.\n",
26455 card->name);
26456- atomic_add(i, &vcc->stats->rx_drop);
26457+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
26458 break;
26459 }
26460 if (!atm_charge(vcc, sb->truesize)) {
26461 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
26462 card->name);
26463- atomic_add(i - 1, &vcc->stats->rx_drop);
26464+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
26465 dev_kfree_skb(sb);
26466 break;
26467 }
26468@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
26469 ATM_SKB(sb)->vcc = vcc;
26470 __net_timestamp(sb);
26471 vcc->push(vcc, sb);
26472- atomic_inc(&vcc->stats->rx);
26473+ atomic_inc_unchecked(&vcc->stats->rx);
26474
26475 cell += ATM_CELL_PAYLOAD;
26476 }
26477@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
26478 "(CDC: %08x)\n",
26479 card->name, len, rpp->len, readl(SAR_REG_CDC));
26480 recycle_rx_pool_skb(card, rpp);
26481- atomic_inc(&vcc->stats->rx_err);
26482+ atomic_inc_unchecked(&vcc->stats->rx_err);
26483 return;
26484 }
26485 if (stat & SAR_RSQE_CRC) {
26486 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
26487 recycle_rx_pool_skb(card, rpp);
26488- atomic_inc(&vcc->stats->rx_err);
26489+ atomic_inc_unchecked(&vcc->stats->rx_err);
26490 return;
26491 }
26492 if (skb_queue_len(&rpp->queue) > 1) {
26493@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
26494 RXPRINTK("%s: Can't alloc RX skb.\n",
26495 card->name);
26496 recycle_rx_pool_skb(card, rpp);
26497- atomic_inc(&vcc->stats->rx_err);
26498+ atomic_inc_unchecked(&vcc->stats->rx_err);
26499 return;
26500 }
26501 if (!atm_charge(vcc, skb->truesize)) {
26502@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
26503 __net_timestamp(skb);
26504
26505 vcc->push(vcc, skb);
26506- atomic_inc(&vcc->stats->rx);
26507+ atomic_inc_unchecked(&vcc->stats->rx);
26508
26509 return;
26510 }
26511@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
26512 __net_timestamp(skb);
26513
26514 vcc->push(vcc, skb);
26515- atomic_inc(&vcc->stats->rx);
26516+ atomic_inc_unchecked(&vcc->stats->rx);
26517
26518 if (skb->truesize > SAR_FB_SIZE_3)
26519 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
26520@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
26521 if (vcc->qos.aal != ATM_AAL0) {
26522 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
26523 card->name, vpi, vci);
26524- atomic_inc(&vcc->stats->rx_drop);
26525+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26526 goto drop;
26527 }
26528
26529 if ((sb = dev_alloc_skb(64)) == NULL) {
26530 printk("%s: Can't allocate buffers for AAL0.\n",
26531 card->name);
26532- atomic_inc(&vcc->stats->rx_err);
26533+ atomic_inc_unchecked(&vcc->stats->rx_err);
26534 goto drop;
26535 }
26536
26537@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
26538 ATM_SKB(sb)->vcc = vcc;
26539 __net_timestamp(sb);
26540 vcc->push(vcc, sb);
26541- atomic_inc(&vcc->stats->rx);
26542+ atomic_inc_unchecked(&vcc->stats->rx);
26543
26544 drop:
26545 skb_pull(queue, 64);
26546@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26547
26548 if (vc == NULL) {
26549 printk("%s: NULL connection in send().\n", card->name);
26550- atomic_inc(&vcc->stats->tx_err);
26551+ atomic_inc_unchecked(&vcc->stats->tx_err);
26552 dev_kfree_skb(skb);
26553 return -EINVAL;
26554 }
26555 if (!test_bit(VCF_TX, &vc->flags)) {
26556 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
26557- atomic_inc(&vcc->stats->tx_err);
26558+ atomic_inc_unchecked(&vcc->stats->tx_err);
26559 dev_kfree_skb(skb);
26560 return -EINVAL;
26561 }
26562@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26563 break;
26564 default:
26565 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
26566- atomic_inc(&vcc->stats->tx_err);
26567+ atomic_inc_unchecked(&vcc->stats->tx_err);
26568 dev_kfree_skb(skb);
26569 return -EINVAL;
26570 }
26571
26572 if (skb_shinfo(skb)->nr_frags != 0) {
26573 printk("%s: No scatter-gather yet.\n", card->name);
26574- atomic_inc(&vcc->stats->tx_err);
26575+ atomic_inc_unchecked(&vcc->stats->tx_err);
26576 dev_kfree_skb(skb);
26577 return -EINVAL;
26578 }
26579@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26580
26581 err = queue_skb(card, vc, skb, oam);
26582 if (err) {
26583- atomic_inc(&vcc->stats->tx_err);
26584+ atomic_inc_unchecked(&vcc->stats->tx_err);
26585 dev_kfree_skb(skb);
26586 return err;
26587 }
26588@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
26589 skb = dev_alloc_skb(64);
26590 if (!skb) {
26591 printk("%s: Out of memory in send_oam().\n", card->name);
26592- atomic_inc(&vcc->stats->tx_err);
26593+ atomic_inc_unchecked(&vcc->stats->tx_err);
26594 return -ENOMEM;
26595 }
26596 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
26597diff -urNp linux-2.6.32.45/drivers/atm/iphase.c linux-2.6.32.45/drivers/atm/iphase.c
26598--- linux-2.6.32.45/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
26599+++ linux-2.6.32.45/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
26600@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
26601 status = (u_short) (buf_desc_ptr->desc_mode);
26602 if (status & (RX_CER | RX_PTE | RX_OFL))
26603 {
26604- atomic_inc(&vcc->stats->rx_err);
26605+ atomic_inc_unchecked(&vcc->stats->rx_err);
26606 IF_ERR(printk("IA: bad packet, dropping it");)
26607 if (status & RX_CER) {
26608 IF_ERR(printk(" cause: packet CRC error\n");)
26609@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
26610 len = dma_addr - buf_addr;
26611 if (len > iadev->rx_buf_sz) {
26612 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
26613- atomic_inc(&vcc->stats->rx_err);
26614+ atomic_inc_unchecked(&vcc->stats->rx_err);
26615 goto out_free_desc;
26616 }
26617
26618@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
26619 ia_vcc = INPH_IA_VCC(vcc);
26620 if (ia_vcc == NULL)
26621 {
26622- atomic_inc(&vcc->stats->rx_err);
26623+ atomic_inc_unchecked(&vcc->stats->rx_err);
26624 dev_kfree_skb_any(skb);
26625 atm_return(vcc, atm_guess_pdu2truesize(len));
26626 goto INCR_DLE;
26627@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
26628 if ((length > iadev->rx_buf_sz) || (length >
26629 (skb->len - sizeof(struct cpcs_trailer))))
26630 {
26631- atomic_inc(&vcc->stats->rx_err);
26632+ atomic_inc_unchecked(&vcc->stats->rx_err);
26633 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26634 length, skb->len);)
26635 dev_kfree_skb_any(skb);
26636@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
26637
26638 IF_RX(printk("rx_dle_intr: skb push");)
26639 vcc->push(vcc,skb);
26640- atomic_inc(&vcc->stats->rx);
26641+ atomic_inc_unchecked(&vcc->stats->rx);
26642 iadev->rx_pkt_cnt++;
26643 }
26644 INCR_DLE:
26645@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
26646 {
26647 struct k_sonet_stats *stats;
26648 stats = &PRIV(_ia_dev[board])->sonet_stats;
26649- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26650- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26651- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26652- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26653- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26654- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26655- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26656- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26657- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26658+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26659+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26660+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26661+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26662+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26663+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26664+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26665+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26666+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26667 }
26668 ia_cmds.status = 0;
26669 break;
26670@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
26671 if ((desc == 0) || (desc > iadev->num_tx_desc))
26672 {
26673 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26674- atomic_inc(&vcc->stats->tx);
26675+ atomic_inc_unchecked(&vcc->stats->tx);
26676 if (vcc->pop)
26677 vcc->pop(vcc, skb);
26678 else
26679@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
26680 ATM_DESC(skb) = vcc->vci;
26681 skb_queue_tail(&iadev->tx_dma_q, skb);
26682
26683- atomic_inc(&vcc->stats->tx);
26684+ atomic_inc_unchecked(&vcc->stats->tx);
26685 iadev->tx_pkt_cnt++;
26686 /* Increment transaction counter */
26687 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26688
26689 #if 0
26690 /* add flow control logic */
26691- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26692+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26693 if (iavcc->vc_desc_cnt > 10) {
26694 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26695 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26696diff -urNp linux-2.6.32.45/drivers/atm/lanai.c linux-2.6.32.45/drivers/atm/lanai.c
26697--- linux-2.6.32.45/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
26698+++ linux-2.6.32.45/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
26699@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
26700 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26701 lanai_endtx(lanai, lvcc);
26702 lanai_free_skb(lvcc->tx.atmvcc, skb);
26703- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26704+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26705 }
26706
26707 /* Try to fill the buffer - don't call unless there is backlog */
26708@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
26709 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26710 __net_timestamp(skb);
26711 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26712- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26713+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26714 out:
26715 lvcc->rx.buf.ptr = end;
26716 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26717@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
26718 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26719 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26720 lanai->stats.service_rxnotaal5++;
26721- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26722+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26723 return 0;
26724 }
26725 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26726@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
26727 int bytes;
26728 read_unlock(&vcc_sklist_lock);
26729 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26730- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26731+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26732 lvcc->stats.x.aal5.service_trash++;
26733 bytes = (SERVICE_GET_END(s) * 16) -
26734 (((unsigned long) lvcc->rx.buf.ptr) -
26735@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
26736 }
26737 if (s & SERVICE_STREAM) {
26738 read_unlock(&vcc_sklist_lock);
26739- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26740+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26741 lvcc->stats.x.aal5.service_stream++;
26742 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26743 "PDU on VCI %d!\n", lanai->number, vci);
26744@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
26745 return 0;
26746 }
26747 DPRINTK("got rx crc error on vci %d\n", vci);
26748- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26749+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26750 lvcc->stats.x.aal5.service_rxcrc++;
26751 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26752 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26753diff -urNp linux-2.6.32.45/drivers/atm/nicstar.c linux-2.6.32.45/drivers/atm/nicstar.c
26754--- linux-2.6.32.45/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
26755+++ linux-2.6.32.45/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
26756@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
26757 if ((vc = (vc_map *) vcc->dev_data) == NULL)
26758 {
26759 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
26760- atomic_inc(&vcc->stats->tx_err);
26761+ atomic_inc_unchecked(&vcc->stats->tx_err);
26762 dev_kfree_skb_any(skb);
26763 return -EINVAL;
26764 }
26765@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
26766 if (!vc->tx)
26767 {
26768 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
26769- atomic_inc(&vcc->stats->tx_err);
26770+ atomic_inc_unchecked(&vcc->stats->tx_err);
26771 dev_kfree_skb_any(skb);
26772 return -EINVAL;
26773 }
26774@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
26775 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
26776 {
26777 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
26778- atomic_inc(&vcc->stats->tx_err);
26779+ atomic_inc_unchecked(&vcc->stats->tx_err);
26780 dev_kfree_skb_any(skb);
26781 return -EINVAL;
26782 }
26783@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
26784 if (skb_shinfo(skb)->nr_frags != 0)
26785 {
26786 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26787- atomic_inc(&vcc->stats->tx_err);
26788+ atomic_inc_unchecked(&vcc->stats->tx_err);
26789 dev_kfree_skb_any(skb);
26790 return -EINVAL;
26791 }
26792@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
26793
26794 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
26795 {
26796- atomic_inc(&vcc->stats->tx_err);
26797+ atomic_inc_unchecked(&vcc->stats->tx_err);
26798 dev_kfree_skb_any(skb);
26799 return -EIO;
26800 }
26801- atomic_inc(&vcc->stats->tx);
26802+ atomic_inc_unchecked(&vcc->stats->tx);
26803
26804 return 0;
26805 }
26806@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
26807 {
26808 printk("nicstar%d: Can't allocate buffers for aal0.\n",
26809 card->index);
26810- atomic_add(i,&vcc->stats->rx_drop);
26811+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
26812 break;
26813 }
26814 if (!atm_charge(vcc, sb->truesize))
26815 {
26816 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
26817 card->index);
26818- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26819+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26820 dev_kfree_skb_any(sb);
26821 break;
26822 }
26823@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
26824 ATM_SKB(sb)->vcc = vcc;
26825 __net_timestamp(sb);
26826 vcc->push(vcc, sb);
26827- atomic_inc(&vcc->stats->rx);
26828+ atomic_inc_unchecked(&vcc->stats->rx);
26829 cell += ATM_CELL_PAYLOAD;
26830 }
26831
26832@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
26833 if (iovb == NULL)
26834 {
26835 printk("nicstar%d: Out of iovec buffers.\n", card->index);
26836- atomic_inc(&vcc->stats->rx_drop);
26837+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26838 recycle_rx_buf(card, skb);
26839 return;
26840 }
26841@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
26842 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
26843 {
26844 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26845- atomic_inc(&vcc->stats->rx_err);
26846+ atomic_inc_unchecked(&vcc->stats->rx_err);
26847 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
26848 NS_SKB(iovb)->iovcnt = 0;
26849 iovb->len = 0;
26850@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
26851 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
26852 card->index);
26853 which_list(card, skb);
26854- atomic_inc(&vcc->stats->rx_err);
26855+ atomic_inc_unchecked(&vcc->stats->rx_err);
26856 recycle_rx_buf(card, skb);
26857 vc->rx_iov = NULL;
26858 recycle_iov_buf(card, iovb);
26859@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
26860 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
26861 card->index);
26862 which_list(card, skb);
26863- atomic_inc(&vcc->stats->rx_err);
26864+ atomic_inc_unchecked(&vcc->stats->rx_err);
26865 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26866 NS_SKB(iovb)->iovcnt);
26867 vc->rx_iov = NULL;
26868@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
26869 printk(" - PDU size mismatch.\n");
26870 else
26871 printk(".\n");
26872- atomic_inc(&vcc->stats->rx_err);
26873+ atomic_inc_unchecked(&vcc->stats->rx_err);
26874 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26875 NS_SKB(iovb)->iovcnt);
26876 vc->rx_iov = NULL;
26877@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
26878 if (!atm_charge(vcc, skb->truesize))
26879 {
26880 push_rxbufs(card, skb);
26881- atomic_inc(&vcc->stats->rx_drop);
26882+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26883 }
26884 else
26885 {
26886@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
26887 ATM_SKB(skb)->vcc = vcc;
26888 __net_timestamp(skb);
26889 vcc->push(vcc, skb);
26890- atomic_inc(&vcc->stats->rx);
26891+ atomic_inc_unchecked(&vcc->stats->rx);
26892 }
26893 }
26894 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
26895@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
26896 if (!atm_charge(vcc, sb->truesize))
26897 {
26898 push_rxbufs(card, sb);
26899- atomic_inc(&vcc->stats->rx_drop);
26900+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26901 }
26902 else
26903 {
26904@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
26905 ATM_SKB(sb)->vcc = vcc;
26906 __net_timestamp(sb);
26907 vcc->push(vcc, sb);
26908- atomic_inc(&vcc->stats->rx);
26909+ atomic_inc_unchecked(&vcc->stats->rx);
26910 }
26911
26912 push_rxbufs(card, skb);
26913@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
26914 if (!atm_charge(vcc, skb->truesize))
26915 {
26916 push_rxbufs(card, skb);
26917- atomic_inc(&vcc->stats->rx_drop);
26918+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26919 }
26920 else
26921 {
26922@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
26923 ATM_SKB(skb)->vcc = vcc;
26924 __net_timestamp(skb);
26925 vcc->push(vcc, skb);
26926- atomic_inc(&vcc->stats->rx);
26927+ atomic_inc_unchecked(&vcc->stats->rx);
26928 }
26929
26930 push_rxbufs(card, sb);
26931@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
26932 if (hb == NULL)
26933 {
26934 printk("nicstar%d: Out of huge buffers.\n", card->index);
26935- atomic_inc(&vcc->stats->rx_drop);
26936+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26937 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26938 NS_SKB(iovb)->iovcnt);
26939 vc->rx_iov = NULL;
26940@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
26941 }
26942 else
26943 dev_kfree_skb_any(hb);
26944- atomic_inc(&vcc->stats->rx_drop);
26945+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26946 }
26947 else
26948 {
26949@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
26950 #endif /* NS_USE_DESTRUCTORS */
26951 __net_timestamp(hb);
26952 vcc->push(vcc, hb);
26953- atomic_inc(&vcc->stats->rx);
26954+ atomic_inc_unchecked(&vcc->stats->rx);
26955 }
26956 }
26957
26958diff -urNp linux-2.6.32.45/drivers/atm/solos-pci.c linux-2.6.32.45/drivers/atm/solos-pci.c
26959--- linux-2.6.32.45/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
26960+++ linux-2.6.32.45/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
26961@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
26962 }
26963 atm_charge(vcc, skb->truesize);
26964 vcc->push(vcc, skb);
26965- atomic_inc(&vcc->stats->rx);
26966+ atomic_inc_unchecked(&vcc->stats->rx);
26967 break;
26968
26969 case PKT_STATUS:
26970@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
26971 char msg[500];
26972 char item[10];
26973
26974+ pax_track_stack();
26975+
26976 len = buf->len;
26977 for (i = 0; i < len; i++){
26978 if(i % 8 == 0)
26979@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
26980 vcc = SKB_CB(oldskb)->vcc;
26981
26982 if (vcc) {
26983- atomic_inc(&vcc->stats->tx);
26984+ atomic_inc_unchecked(&vcc->stats->tx);
26985 solos_pop(vcc, oldskb);
26986 } else
26987 dev_kfree_skb_irq(oldskb);
26988diff -urNp linux-2.6.32.45/drivers/atm/suni.c linux-2.6.32.45/drivers/atm/suni.c
26989--- linux-2.6.32.45/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
26990+++ linux-2.6.32.45/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
26991@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26992
26993
26994 #define ADD_LIMITED(s,v) \
26995- atomic_add((v),&stats->s); \
26996- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26997+ atomic_add_unchecked((v),&stats->s); \
26998+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26999
27000
27001 static void suni_hz(unsigned long from_timer)
27002diff -urNp linux-2.6.32.45/drivers/atm/uPD98402.c linux-2.6.32.45/drivers/atm/uPD98402.c
27003--- linux-2.6.32.45/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
27004+++ linux-2.6.32.45/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
27005@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
27006 struct sonet_stats tmp;
27007 int error = 0;
27008
27009- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27010+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27011 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
27012 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
27013 if (zero && !error) {
27014@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
27015
27016
27017 #define ADD_LIMITED(s,v) \
27018- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
27019- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
27020- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27021+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
27022+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
27023+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27024
27025
27026 static void stat_event(struct atm_dev *dev)
27027@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
27028 if (reason & uPD98402_INT_PFM) stat_event(dev);
27029 if (reason & uPD98402_INT_PCO) {
27030 (void) GET(PCOCR); /* clear interrupt cause */
27031- atomic_add(GET(HECCT),
27032+ atomic_add_unchecked(GET(HECCT),
27033 &PRIV(dev)->sonet_stats.uncorr_hcs);
27034 }
27035 if ((reason & uPD98402_INT_RFO) &&
27036@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
27037 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
27038 uPD98402_INT_LOS),PIMR); /* enable them */
27039 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
27040- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27041- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
27042- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
27043+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27044+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
27045+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
27046 return 0;
27047 }
27048
27049diff -urNp linux-2.6.32.45/drivers/atm/zatm.c linux-2.6.32.45/drivers/atm/zatm.c
27050--- linux-2.6.32.45/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
27051+++ linux-2.6.32.45/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
27052@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
27053 }
27054 if (!size) {
27055 dev_kfree_skb_irq(skb);
27056- if (vcc) atomic_inc(&vcc->stats->rx_err);
27057+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
27058 continue;
27059 }
27060 if (!atm_charge(vcc,skb->truesize)) {
27061@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
27062 skb->len = size;
27063 ATM_SKB(skb)->vcc = vcc;
27064 vcc->push(vcc,skb);
27065- atomic_inc(&vcc->stats->rx);
27066+ atomic_inc_unchecked(&vcc->stats->rx);
27067 }
27068 zout(pos & 0xffff,MTA(mbx));
27069 #if 0 /* probably a stupid idea */
27070@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
27071 skb_queue_head(&zatm_vcc->backlog,skb);
27072 break;
27073 }
27074- atomic_inc(&vcc->stats->tx);
27075+ atomic_inc_unchecked(&vcc->stats->tx);
27076 wake_up(&zatm_vcc->tx_wait);
27077 }
27078
27079diff -urNp linux-2.6.32.45/drivers/base/bus.c linux-2.6.32.45/drivers/base/bus.c
27080--- linux-2.6.32.45/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
27081+++ linux-2.6.32.45/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
27082@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
27083 return ret;
27084 }
27085
27086-static struct sysfs_ops driver_sysfs_ops = {
27087+static const struct sysfs_ops driver_sysfs_ops = {
27088 .show = drv_attr_show,
27089 .store = drv_attr_store,
27090 };
27091@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
27092 return ret;
27093 }
27094
27095-static struct sysfs_ops bus_sysfs_ops = {
27096+static const struct sysfs_ops bus_sysfs_ops = {
27097 .show = bus_attr_show,
27098 .store = bus_attr_store,
27099 };
27100@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
27101 return 0;
27102 }
27103
27104-static struct kset_uevent_ops bus_uevent_ops = {
27105+static const struct kset_uevent_ops bus_uevent_ops = {
27106 .filter = bus_uevent_filter,
27107 };
27108
27109diff -urNp linux-2.6.32.45/drivers/base/class.c linux-2.6.32.45/drivers/base/class.c
27110--- linux-2.6.32.45/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
27111+++ linux-2.6.32.45/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
27112@@ -63,7 +63,7 @@ static void class_release(struct kobject
27113 kfree(cp);
27114 }
27115
27116-static struct sysfs_ops class_sysfs_ops = {
27117+static const struct sysfs_ops class_sysfs_ops = {
27118 .show = class_attr_show,
27119 .store = class_attr_store,
27120 };
27121diff -urNp linux-2.6.32.45/drivers/base/core.c linux-2.6.32.45/drivers/base/core.c
27122--- linux-2.6.32.45/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
27123+++ linux-2.6.32.45/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
27124@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
27125 return ret;
27126 }
27127
27128-static struct sysfs_ops dev_sysfs_ops = {
27129+static const struct sysfs_ops dev_sysfs_ops = {
27130 .show = dev_attr_show,
27131 .store = dev_attr_store,
27132 };
27133@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
27134 return retval;
27135 }
27136
27137-static struct kset_uevent_ops device_uevent_ops = {
27138+static const struct kset_uevent_ops device_uevent_ops = {
27139 .filter = dev_uevent_filter,
27140 .name = dev_uevent_name,
27141 .uevent = dev_uevent,
27142diff -urNp linux-2.6.32.45/drivers/base/memory.c linux-2.6.32.45/drivers/base/memory.c
27143--- linux-2.6.32.45/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
27144+++ linux-2.6.32.45/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
27145@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
27146 return retval;
27147 }
27148
27149-static struct kset_uevent_ops memory_uevent_ops = {
27150+static const struct kset_uevent_ops memory_uevent_ops = {
27151 .name = memory_uevent_name,
27152 .uevent = memory_uevent,
27153 };
27154diff -urNp linux-2.6.32.45/drivers/base/sys.c linux-2.6.32.45/drivers/base/sys.c
27155--- linux-2.6.32.45/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
27156+++ linux-2.6.32.45/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
27157@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
27158 return -EIO;
27159 }
27160
27161-static struct sysfs_ops sysfs_ops = {
27162+static const struct sysfs_ops sysfs_ops = {
27163 .show = sysdev_show,
27164 .store = sysdev_store,
27165 };
27166@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
27167 return -EIO;
27168 }
27169
27170-static struct sysfs_ops sysfs_class_ops = {
27171+static const struct sysfs_ops sysfs_class_ops = {
27172 .show = sysdev_class_show,
27173 .store = sysdev_class_store,
27174 };
27175diff -urNp linux-2.6.32.45/drivers/block/cciss.c linux-2.6.32.45/drivers/block/cciss.c
27176--- linux-2.6.32.45/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
27177+++ linux-2.6.32.45/drivers/block/cciss.c 2011-08-05 20:33:55.000000000 -0400
27178@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
27179 int err;
27180 u32 cp;
27181
27182+ memset(&arg64, 0, sizeof(arg64));
27183+
27184 err = 0;
27185 err |=
27186 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
27187@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ct
27188 /* Wait (up to 20 seconds) for a command to complete */
27189
27190 for (i = 20 * HZ; i > 0; i--) {
27191- done = hba[ctlr]->access.command_completed(hba[ctlr]);
27192+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
27193 if (done == FIFO_EMPTY)
27194 schedule_timeout_uninterruptible(1);
27195 else
27196@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h,
27197 resend_cmd1:
27198
27199 /* Disable interrupt on the board. */
27200- h->access.set_intr_mask(h, CCISS_INTR_OFF);
27201+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
27202
27203 /* Make sure there is room in the command FIFO */
27204 /* Actually it should be completely empty at this time */
27205@@ -2884,13 +2886,13 @@ resend_cmd1:
27206 /* tape side of the driver. */
27207 for (i = 200000; i > 0; i--) {
27208 /* if fifo isn't full go */
27209- if (!(h->access.fifo_full(h)))
27210+ if (!(h->access->fifo_full(h)))
27211 break;
27212 udelay(10);
27213 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
27214 " waiting!\n", h->ctlr);
27215 }
27216- h->access.submit_command(h, c); /* Send the cmd */
27217+ h->access->submit_command(h, c); /* Send the cmd */
27218 do {
27219 complete = pollcomplete(h->ctlr);
27220
27221@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
27222 while (!hlist_empty(&h->reqQ)) {
27223 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
27224 /* can't do anything if fifo is full */
27225- if ((h->access.fifo_full(h))) {
27226+ if ((h->access->fifo_full(h))) {
27227 printk(KERN_WARNING "cciss: fifo full\n");
27228 break;
27229 }
27230@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
27231 h->Qdepth--;
27232
27233 /* Tell the controller execute command */
27234- h->access.submit_command(h, c);
27235+ h->access->submit_command(h, c);
27236
27237 /* Put job onto the completed Q */
27238 addQ(&h->cmpQ, c);
27239@@ -3393,17 +3395,17 @@ startio:
27240
27241 static inline unsigned long get_next_completion(ctlr_info_t *h)
27242 {
27243- return h->access.command_completed(h);
27244+ return h->access->command_completed(h);
27245 }
27246
27247 static inline int interrupt_pending(ctlr_info_t *h)
27248 {
27249- return h->access.intr_pending(h);
27250+ return h->access->intr_pending(h);
27251 }
27252
27253 static inline long interrupt_not_for_us(ctlr_info_t *h)
27254 {
27255- return (((h->access.intr_pending(h) == 0) ||
27256+ return (((h->access->intr_pending(h) == 0) ||
27257 (h->interrupts_enabled == 0)));
27258 }
27259
27260@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr
27261 */
27262 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
27263 c->product_name = products[prod_index].product_name;
27264- c->access = *(products[prod_index].access);
27265+ c->access = products[prod_index].access;
27266 c->nr_cmds = c->max_commands - 4;
27267 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
27268 (readb(&c->cfgtable->Signature[1]) != 'I') ||
27269@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(stru
27270 }
27271
27272 /* make sure the board interrupts are off */
27273- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
27274+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
27275 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
27276 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
27277 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
27278@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(stru
27279 cciss_scsi_setup(i);
27280
27281 /* Turn the interrupts on so we can service requests */
27282- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
27283+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
27284
27285 /* Get the firmware version */
27286 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
27287diff -urNp linux-2.6.32.45/drivers/block/cciss.h linux-2.6.32.45/drivers/block/cciss.h
27288--- linux-2.6.32.45/drivers/block/cciss.h 2011-08-09 18:35:28.000000000 -0400
27289+++ linux-2.6.32.45/drivers/block/cciss.h 2011-08-09 18:33:59.000000000 -0400
27290@@ -90,7 +90,7 @@ struct ctlr_info
27291 // information about each logical volume
27292 drive_info_struct *drv[CISS_MAX_LUN];
27293
27294- struct access_method access;
27295+ struct access_method *access;
27296
27297 /* queue and queue Info */
27298 struct hlist_head reqQ;
27299diff -urNp linux-2.6.32.45/drivers/block/cpqarray.c linux-2.6.32.45/drivers/block/cpqarray.c
27300--- linux-2.6.32.45/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
27301+++ linux-2.6.32.45/drivers/block/cpqarray.c 2011-08-05 20:33:55.000000000 -0400
27302@@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr
27303 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
27304 goto Enomem4;
27305 }
27306- hba[i]->access.set_intr_mask(hba[i], 0);
27307+ hba[i]->access->set_intr_mask(hba[i], 0);
27308 if (request_irq(hba[i]->intr, do_ida_intr,
27309 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
27310 {
27311@@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr
27312 add_timer(&hba[i]->timer);
27313
27314 /* Enable IRQ now that spinlock and rate limit timer are set up */
27315- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27316+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27317
27318 for(j=0; j<NWD; j++) {
27319 struct gendisk *disk = ida_gendisk[i][j];
27320@@ -695,7 +695,7 @@ DBGINFO(
27321 for(i=0; i<NR_PRODUCTS; i++) {
27322 if (board_id == products[i].board_id) {
27323 c->product_name = products[i].product_name;
27324- c->access = *(products[i].access);
27325+ c->access = products[i].access;
27326 break;
27327 }
27328 }
27329@@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(v
27330 hba[ctlr]->intr = intr;
27331 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
27332 hba[ctlr]->product_name = products[j].product_name;
27333- hba[ctlr]->access = *(products[j].access);
27334+ hba[ctlr]->access = products[j].access;
27335 hba[ctlr]->ctlr = ctlr;
27336 hba[ctlr]->board_id = board_id;
27337 hba[ctlr]->pci_dev = NULL; /* not PCI */
27338@@ -896,6 +896,8 @@ static void do_ida_request(struct reques
27339 struct scatterlist tmp_sg[SG_MAX];
27340 int i, dir, seg;
27341
27342+ pax_track_stack();
27343+
27344 if (blk_queue_plugged(q))
27345 goto startio;
27346
27347@@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
27348
27349 while((c = h->reqQ) != NULL) {
27350 /* Can't do anything if we're busy */
27351- if (h->access.fifo_full(h) == 0)
27352+ if (h->access->fifo_full(h) == 0)
27353 return;
27354
27355 /* Get the first entry from the request Q */
27356@@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
27357 h->Qdepth--;
27358
27359 /* Tell the controller to do our bidding */
27360- h->access.submit_command(h, c);
27361+ h->access->submit_command(h, c);
27362
27363 /* Get onto the completion Q */
27364 addQ(&h->cmpQ, c);
27365@@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq,
27366 unsigned long flags;
27367 __u32 a,a1;
27368
27369- istat = h->access.intr_pending(h);
27370+ istat = h->access->intr_pending(h);
27371 /* Is this interrupt for us? */
27372 if (istat == 0)
27373 return IRQ_NONE;
27374@@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq,
27375 */
27376 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
27377 if (istat & FIFO_NOT_EMPTY) {
27378- while((a = h->access.command_completed(h))) {
27379+ while((a = h->access->command_completed(h))) {
27380 a1 = a; a &= ~3;
27381 if ((c = h->cmpQ) == NULL)
27382 {
27383@@ -1434,11 +1436,11 @@ static int sendcmd(
27384 /*
27385 * Disable interrupt
27386 */
27387- info_p->access.set_intr_mask(info_p, 0);
27388+ info_p->access->set_intr_mask(info_p, 0);
27389 /* Make sure there is room in the command FIFO */
27390 /* Actually it should be completely empty at this time. */
27391 for (i = 200000; i > 0; i--) {
27392- temp = info_p->access.fifo_full(info_p);
27393+ temp = info_p->access->fifo_full(info_p);
27394 if (temp != 0) {
27395 break;
27396 }
27397@@ -1451,7 +1453,7 @@ DBG(
27398 /*
27399 * Send the cmd
27400 */
27401- info_p->access.submit_command(info_p, c);
27402+ info_p->access->submit_command(info_p, c);
27403 complete = pollcomplete(ctlr);
27404
27405 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
27406@@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t
27407 * we check the new geometry. Then turn interrupts back on when
27408 * we're done.
27409 */
27410- host->access.set_intr_mask(host, 0);
27411+ host->access->set_intr_mask(host, 0);
27412 getgeometry(ctlr);
27413- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
27414+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
27415
27416 for(i=0; i<NWD; i++) {
27417 struct gendisk *disk = ida_gendisk[ctlr][i];
27418@@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
27419 /* Wait (up to 2 seconds) for a command to complete */
27420
27421 for (i = 200000; i > 0; i--) {
27422- done = hba[ctlr]->access.command_completed(hba[ctlr]);
27423+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
27424 if (done == 0) {
27425 udelay(10); /* a short fixed delay */
27426 } else
27427diff -urNp linux-2.6.32.45/drivers/block/cpqarray.h linux-2.6.32.45/drivers/block/cpqarray.h
27428--- linux-2.6.32.45/drivers/block/cpqarray.h 2011-03-27 14:31:47.000000000 -0400
27429+++ linux-2.6.32.45/drivers/block/cpqarray.h 2011-08-05 20:33:55.000000000 -0400
27430@@ -99,7 +99,7 @@ struct ctlr_info {
27431 drv_info_t drv[NWD];
27432 struct proc_dir_entry *proc;
27433
27434- struct access_method access;
27435+ struct access_method *access;
27436
27437 cmdlist_t *reqQ;
27438 cmdlist_t *cmpQ;
27439diff -urNp linux-2.6.32.45/drivers/block/DAC960.c linux-2.6.32.45/drivers/block/DAC960.c
27440--- linux-2.6.32.45/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
27441+++ linux-2.6.32.45/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
27442@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
27443 unsigned long flags;
27444 int Channel, TargetID;
27445
27446+ pax_track_stack();
27447+
27448 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
27449 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
27450 sizeof(DAC960_SCSI_Inquiry_T) +
27451diff -urNp linux-2.6.32.45/drivers/block/nbd.c linux-2.6.32.45/drivers/block/nbd.c
27452--- linux-2.6.32.45/drivers/block/nbd.c 2011-06-25 12:55:34.000000000 -0400
27453+++ linux-2.6.32.45/drivers/block/nbd.c 2011-06-25 12:56:37.000000000 -0400
27454@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
27455 struct kvec iov;
27456 sigset_t blocked, oldset;
27457
27458+ pax_track_stack();
27459+
27460 if (unlikely(!sock)) {
27461 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
27462 lo->disk->disk_name, (send ? "send" : "recv"));
27463@@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
27464 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
27465 unsigned int cmd, unsigned long arg)
27466 {
27467+ pax_track_stack();
27468+
27469 switch (cmd) {
27470 case NBD_DISCONNECT: {
27471 struct request sreq;
27472diff -urNp linux-2.6.32.45/drivers/block/pktcdvd.c linux-2.6.32.45/drivers/block/pktcdvd.c
27473--- linux-2.6.32.45/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
27474+++ linux-2.6.32.45/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
27475@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
27476 return len;
27477 }
27478
27479-static struct sysfs_ops kobj_pkt_ops = {
27480+static const struct sysfs_ops kobj_pkt_ops = {
27481 .show = kobj_pkt_show,
27482 .store = kobj_pkt_store
27483 };
27484diff -urNp linux-2.6.32.45/drivers/char/agp/frontend.c linux-2.6.32.45/drivers/char/agp/frontend.c
27485--- linux-2.6.32.45/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
27486+++ linux-2.6.32.45/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
27487@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
27488 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
27489 return -EFAULT;
27490
27491- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
27492+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
27493 return -EFAULT;
27494
27495 client = agp_find_client_by_pid(reserve.pid);
27496diff -urNp linux-2.6.32.45/drivers/char/briq_panel.c linux-2.6.32.45/drivers/char/briq_panel.c
27497--- linux-2.6.32.45/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
27498+++ linux-2.6.32.45/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
27499@@ -10,6 +10,7 @@
27500 #include <linux/types.h>
27501 #include <linux/errno.h>
27502 #include <linux/tty.h>
27503+#include <linux/mutex.h>
27504 #include <linux/timer.h>
27505 #include <linux/kernel.h>
27506 #include <linux/wait.h>
27507@@ -36,6 +37,7 @@ static int vfd_is_open;
27508 static unsigned char vfd[40];
27509 static int vfd_cursor;
27510 static unsigned char ledpb, led;
27511+static DEFINE_MUTEX(vfd_mutex);
27512
27513 static void update_vfd(void)
27514 {
27515@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
27516 if (!vfd_is_open)
27517 return -EBUSY;
27518
27519+ mutex_lock(&vfd_mutex);
27520 for (;;) {
27521 char c;
27522 if (!indx)
27523 break;
27524- if (get_user(c, buf))
27525+ if (get_user(c, buf)) {
27526+ mutex_unlock(&vfd_mutex);
27527 return -EFAULT;
27528+ }
27529 if (esc) {
27530 set_led(c);
27531 esc = 0;
27532@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
27533 buf++;
27534 }
27535 update_vfd();
27536+ mutex_unlock(&vfd_mutex);
27537
27538 return len;
27539 }
27540diff -urNp linux-2.6.32.45/drivers/char/genrtc.c linux-2.6.32.45/drivers/char/genrtc.c
27541--- linux-2.6.32.45/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
27542+++ linux-2.6.32.45/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
27543@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
27544 switch (cmd) {
27545
27546 case RTC_PLL_GET:
27547+ memset(&pll, 0, sizeof(pll));
27548 if (get_rtc_pll(&pll))
27549 return -EINVAL;
27550 else
27551diff -urNp linux-2.6.32.45/drivers/char/hpet.c linux-2.6.32.45/drivers/char/hpet.c
27552--- linux-2.6.32.45/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
27553+++ linux-2.6.32.45/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
27554@@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
27555 return 0;
27556 }
27557
27558-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
27559+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
27560
27561 static int
27562 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
27563@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
27564 }
27565
27566 static int
27567-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
27568+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
27569 {
27570 struct hpet_timer __iomem *timer;
27571 struct hpet __iomem *hpet;
27572@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
27573 {
27574 struct hpet_info info;
27575
27576+ memset(&info, 0, sizeof(info));
27577+
27578 if (devp->hd_ireqfreq)
27579 info.hi_ireqfreq =
27580 hpet_time_div(hpetp, devp->hd_ireqfreq);
27581- else
27582- info.hi_ireqfreq = 0;
27583 info.hi_flags =
27584 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
27585 info.hi_hpet = hpetp->hp_which;
27586diff -urNp linux-2.6.32.45/drivers/char/hvc_beat.c linux-2.6.32.45/drivers/char/hvc_beat.c
27587--- linux-2.6.32.45/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
27588+++ linux-2.6.32.45/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
27589@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
27590 return cnt;
27591 }
27592
27593-static struct hv_ops hvc_beat_get_put_ops = {
27594+static const struct hv_ops hvc_beat_get_put_ops = {
27595 .get_chars = hvc_beat_get_chars,
27596 .put_chars = hvc_beat_put_chars,
27597 };
27598diff -urNp linux-2.6.32.45/drivers/char/hvc_console.c linux-2.6.32.45/drivers/char/hvc_console.c
27599--- linux-2.6.32.45/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
27600+++ linux-2.6.32.45/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
27601@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
27602 * console interfaces but can still be used as a tty device. This has to be
27603 * static because kmalloc will not work during early console init.
27604 */
27605-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27606+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27607 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
27608 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
27609
27610@@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
27611 * vty adapters do NOT get an hvc_instantiate() callback since they
27612 * appear after early console init.
27613 */
27614-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
27615+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
27616 {
27617 struct hvc_struct *hp;
27618
27619@@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
27620 };
27621
27622 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
27623- struct hv_ops *ops, int outbuf_size)
27624+ const struct hv_ops *ops, int outbuf_size)
27625 {
27626 struct hvc_struct *hp;
27627 int i;
27628diff -urNp linux-2.6.32.45/drivers/char/hvc_console.h linux-2.6.32.45/drivers/char/hvc_console.h
27629--- linux-2.6.32.45/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
27630+++ linux-2.6.32.45/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
27631@@ -55,7 +55,7 @@ struct hvc_struct {
27632 int outbuf_size;
27633 int n_outbuf;
27634 uint32_t vtermno;
27635- struct hv_ops *ops;
27636+ const struct hv_ops *ops;
27637 int irq_requested;
27638 int data;
27639 struct winsize ws;
27640@@ -76,11 +76,11 @@ struct hv_ops {
27641 };
27642
27643 /* Register a vterm and a slot index for use as a console (console_init) */
27644-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
27645+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
27646
27647 /* register a vterm for hvc tty operation (module_init or hotplug add) */
27648 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
27649- struct hv_ops *ops, int outbuf_size);
27650+ const struct hv_ops *ops, int outbuf_size);
27651 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
27652 extern int hvc_remove(struct hvc_struct *hp);
27653
27654diff -urNp linux-2.6.32.45/drivers/char/hvc_iseries.c linux-2.6.32.45/drivers/char/hvc_iseries.c
27655--- linux-2.6.32.45/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
27656+++ linux-2.6.32.45/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
27657@@ -197,7 +197,7 @@ done:
27658 return sent;
27659 }
27660
27661-static struct hv_ops hvc_get_put_ops = {
27662+static const struct hv_ops hvc_get_put_ops = {
27663 .get_chars = get_chars,
27664 .put_chars = put_chars,
27665 .notifier_add = notifier_add_irq,
27666diff -urNp linux-2.6.32.45/drivers/char/hvc_iucv.c linux-2.6.32.45/drivers/char/hvc_iucv.c
27667--- linux-2.6.32.45/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
27668+++ linux-2.6.32.45/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
27669@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
27670
27671
27672 /* HVC operations */
27673-static struct hv_ops hvc_iucv_ops = {
27674+static const struct hv_ops hvc_iucv_ops = {
27675 .get_chars = hvc_iucv_get_chars,
27676 .put_chars = hvc_iucv_put_chars,
27677 .notifier_add = hvc_iucv_notifier_add,
27678diff -urNp linux-2.6.32.45/drivers/char/hvc_rtas.c linux-2.6.32.45/drivers/char/hvc_rtas.c
27679--- linux-2.6.32.45/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
27680+++ linux-2.6.32.45/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
27681@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
27682 return i;
27683 }
27684
27685-static struct hv_ops hvc_rtas_get_put_ops = {
27686+static const struct hv_ops hvc_rtas_get_put_ops = {
27687 .get_chars = hvc_rtas_read_console,
27688 .put_chars = hvc_rtas_write_console,
27689 };
27690diff -urNp linux-2.6.32.45/drivers/char/hvcs.c linux-2.6.32.45/drivers/char/hvcs.c
27691--- linux-2.6.32.45/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
27692+++ linux-2.6.32.45/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
27693@@ -82,6 +82,7 @@
27694 #include <asm/hvcserver.h>
27695 #include <asm/uaccess.h>
27696 #include <asm/vio.h>
27697+#include <asm/local.h>
27698
27699 /*
27700 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
27701@@ -269,7 +270,7 @@ struct hvcs_struct {
27702 unsigned int index;
27703
27704 struct tty_struct *tty;
27705- int open_count;
27706+ local_t open_count;
27707
27708 /*
27709 * Used to tell the driver kernel_thread what operations need to take
27710@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
27711
27712 spin_lock_irqsave(&hvcsd->lock, flags);
27713
27714- if (hvcsd->open_count > 0) {
27715+ if (local_read(&hvcsd->open_count) > 0) {
27716 spin_unlock_irqrestore(&hvcsd->lock, flags);
27717 printk(KERN_INFO "HVCS: vterm state unchanged. "
27718 "The hvcs device node is still in use.\n");
27719@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
27720 if ((retval = hvcs_partner_connect(hvcsd)))
27721 goto error_release;
27722
27723- hvcsd->open_count = 1;
27724+ local_set(&hvcsd->open_count, 1);
27725 hvcsd->tty = tty;
27726 tty->driver_data = hvcsd;
27727
27728@@ -1169,7 +1170,7 @@ fast_open:
27729
27730 spin_lock_irqsave(&hvcsd->lock, flags);
27731 kref_get(&hvcsd->kref);
27732- hvcsd->open_count++;
27733+ local_inc(&hvcsd->open_count);
27734 hvcsd->todo_mask |= HVCS_SCHED_READ;
27735 spin_unlock_irqrestore(&hvcsd->lock, flags);
27736
27737@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
27738 hvcsd = tty->driver_data;
27739
27740 spin_lock_irqsave(&hvcsd->lock, flags);
27741- if (--hvcsd->open_count == 0) {
27742+ if (local_dec_and_test(&hvcsd->open_count)) {
27743
27744 vio_disable_interrupts(hvcsd->vdev);
27745
27746@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
27747 free_irq(irq, hvcsd);
27748 kref_put(&hvcsd->kref, destroy_hvcs_struct);
27749 return;
27750- } else if (hvcsd->open_count < 0) {
27751+ } else if (local_read(&hvcsd->open_count) < 0) {
27752 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
27753 " is missmanaged.\n",
27754- hvcsd->vdev->unit_address, hvcsd->open_count);
27755+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
27756 }
27757
27758 spin_unlock_irqrestore(&hvcsd->lock, flags);
27759@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
27760
27761 spin_lock_irqsave(&hvcsd->lock, flags);
27762 /* Preserve this so that we know how many kref refs to put */
27763- temp_open_count = hvcsd->open_count;
27764+ temp_open_count = local_read(&hvcsd->open_count);
27765
27766 /*
27767 * Don't kref put inside the spinlock because the destruction
27768@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
27769 hvcsd->tty->driver_data = NULL;
27770 hvcsd->tty = NULL;
27771
27772- hvcsd->open_count = 0;
27773+ local_set(&hvcsd->open_count, 0);
27774
27775 /* This will drop any buffered data on the floor which is OK in a hangup
27776 * scenario. */
27777@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
27778 * the middle of a write operation? This is a crummy place to do this
27779 * but we want to keep it all in the spinlock.
27780 */
27781- if (hvcsd->open_count <= 0) {
27782+ if (local_read(&hvcsd->open_count) <= 0) {
27783 spin_unlock_irqrestore(&hvcsd->lock, flags);
27784 return -ENODEV;
27785 }
27786@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
27787 {
27788 struct hvcs_struct *hvcsd = tty->driver_data;
27789
27790- if (!hvcsd || hvcsd->open_count <= 0)
27791+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
27792 return 0;
27793
27794 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
27795diff -urNp linux-2.6.32.45/drivers/char/hvc_udbg.c linux-2.6.32.45/drivers/char/hvc_udbg.c
27796--- linux-2.6.32.45/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
27797+++ linux-2.6.32.45/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
27798@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
27799 return i;
27800 }
27801
27802-static struct hv_ops hvc_udbg_ops = {
27803+static const struct hv_ops hvc_udbg_ops = {
27804 .get_chars = hvc_udbg_get,
27805 .put_chars = hvc_udbg_put,
27806 };
27807diff -urNp linux-2.6.32.45/drivers/char/hvc_vio.c linux-2.6.32.45/drivers/char/hvc_vio.c
27808--- linux-2.6.32.45/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
27809+++ linux-2.6.32.45/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
27810@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
27811 return got;
27812 }
27813
27814-static struct hv_ops hvc_get_put_ops = {
27815+static const struct hv_ops hvc_get_put_ops = {
27816 .get_chars = filtered_get_chars,
27817 .put_chars = hvc_put_chars,
27818 .notifier_add = notifier_add_irq,
27819diff -urNp linux-2.6.32.45/drivers/char/hvc_xen.c linux-2.6.32.45/drivers/char/hvc_xen.c
27820--- linux-2.6.32.45/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
27821+++ linux-2.6.32.45/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
27822@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
27823 return recv;
27824 }
27825
27826-static struct hv_ops hvc_ops = {
27827+static const struct hv_ops hvc_ops = {
27828 .get_chars = read_console,
27829 .put_chars = write_console,
27830 .notifier_add = notifier_add_irq,
27831diff -urNp linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c
27832--- linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
27833+++ linux-2.6.32.45/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
27834@@ -414,7 +414,7 @@ struct ipmi_smi {
27835 struct proc_dir_entry *proc_dir;
27836 char proc_dir_name[10];
27837
27838- atomic_t stats[IPMI_NUM_STATS];
27839+ atomic_unchecked_t stats[IPMI_NUM_STATS];
27840
27841 /*
27842 * run_to_completion duplicate of smb_info, smi_info
27843@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27844
27845
27846 #define ipmi_inc_stat(intf, stat) \
27847- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27848+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27849 #define ipmi_get_stat(intf, stat) \
27850- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27851+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27852
27853 static int is_lan_addr(struct ipmi_addr *addr)
27854 {
27855@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
27856 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27857 init_waitqueue_head(&intf->waitq);
27858 for (i = 0; i < IPMI_NUM_STATS; i++)
27859- atomic_set(&intf->stats[i], 0);
27860+ atomic_set_unchecked(&intf->stats[i], 0);
27861
27862 intf->proc_dir = NULL;
27863
27864@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
27865 struct ipmi_smi_msg smi_msg;
27866 struct ipmi_recv_msg recv_msg;
27867
27868+ pax_track_stack();
27869+
27870 si = (struct ipmi_system_interface_addr *) &addr;
27871 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
27872 si->channel = IPMI_BMC_CHANNEL;
27873diff -urNp linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c
27874--- linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
27875+++ linux-2.6.32.45/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
27876@@ -277,7 +277,7 @@ struct smi_info {
27877 unsigned char slave_addr;
27878
27879 /* Counters and things for the proc filesystem. */
27880- atomic_t stats[SI_NUM_STATS];
27881+ atomic_unchecked_t stats[SI_NUM_STATS];
27882
27883 struct task_struct *thread;
27884
27885@@ -285,9 +285,9 @@ struct smi_info {
27886 };
27887
27888 #define smi_inc_stat(smi, stat) \
27889- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27890+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27891 #define smi_get_stat(smi, stat) \
27892- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27893+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27894
27895 #define SI_MAX_PARMS 4
27896
27897@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
27898 atomic_set(&new_smi->req_events, 0);
27899 new_smi->run_to_completion = 0;
27900 for (i = 0; i < SI_NUM_STATS; i++)
27901- atomic_set(&new_smi->stats[i], 0);
27902+ atomic_set_unchecked(&new_smi->stats[i], 0);
27903
27904 new_smi->interrupt_disabled = 0;
27905 atomic_set(&new_smi->stop_operation, 0);
27906diff -urNp linux-2.6.32.45/drivers/char/istallion.c linux-2.6.32.45/drivers/char/istallion.c
27907--- linux-2.6.32.45/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
27908+++ linux-2.6.32.45/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
27909@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
27910 * re-used for each stats call.
27911 */
27912 static comstats_t stli_comstats;
27913-static combrd_t stli_brdstats;
27914 static struct asystats stli_cdkstats;
27915
27916 /*****************************************************************************/
27917@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
27918 {
27919 struct stlibrd *brdp;
27920 unsigned int i;
27921+ combrd_t stli_brdstats;
27922
27923 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
27924 return -EFAULT;
27925@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
27926 struct stliport stli_dummyport;
27927 struct stliport *portp;
27928
27929+ pax_track_stack();
27930+
27931 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
27932 return -EFAULT;
27933 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
27934@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
27935 struct stlibrd stli_dummybrd;
27936 struct stlibrd *brdp;
27937
27938+ pax_track_stack();
27939+
27940 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
27941 return -EFAULT;
27942 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
27943diff -urNp linux-2.6.32.45/drivers/char/Kconfig linux-2.6.32.45/drivers/char/Kconfig
27944--- linux-2.6.32.45/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
27945+++ linux-2.6.32.45/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
27946@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
27947
27948 config DEVKMEM
27949 bool "/dev/kmem virtual device support"
27950- default y
27951+ default n
27952+ depends on !GRKERNSEC_KMEM
27953 help
27954 Say Y here if you want to support the /dev/kmem device. The
27955 /dev/kmem device is rarely used, but can be used for certain
27956@@ -1114,6 +1115,7 @@ config DEVPORT
27957 bool
27958 depends on !M68K
27959 depends on ISA || PCI
27960+ depends on !GRKERNSEC_KMEM
27961 default y
27962
27963 source "drivers/s390/char/Kconfig"
27964diff -urNp linux-2.6.32.45/drivers/char/keyboard.c linux-2.6.32.45/drivers/char/keyboard.c
27965--- linux-2.6.32.45/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
27966+++ linux-2.6.32.45/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
27967@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
27968 kbd->kbdmode == VC_MEDIUMRAW) &&
27969 value != KVAL(K_SAK))
27970 return; /* SAK is allowed even in raw mode */
27971+
27972+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
27973+ {
27974+ void *func = fn_handler[value];
27975+ if (func == fn_show_state || func == fn_show_ptregs ||
27976+ func == fn_show_mem)
27977+ return;
27978+ }
27979+#endif
27980+
27981 fn_handler[value](vc);
27982 }
27983
27984@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
27985 .evbit = { BIT_MASK(EV_SND) },
27986 },
27987
27988- { }, /* Terminating entry */
27989+ { 0 }, /* Terminating entry */
27990 };
27991
27992 MODULE_DEVICE_TABLE(input, kbd_ids);
27993diff -urNp linux-2.6.32.45/drivers/char/mem.c linux-2.6.32.45/drivers/char/mem.c
27994--- linux-2.6.32.45/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
27995+++ linux-2.6.32.45/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
27996@@ -18,6 +18,7 @@
27997 #include <linux/raw.h>
27998 #include <linux/tty.h>
27999 #include <linux/capability.h>
28000+#include <linux/security.h>
28001 #include <linux/ptrace.h>
28002 #include <linux/device.h>
28003 #include <linux/highmem.h>
28004@@ -35,6 +36,10 @@
28005 # include <linux/efi.h>
28006 #endif
28007
28008+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28009+extern struct file_operations grsec_fops;
28010+#endif
28011+
28012 static inline unsigned long size_inside_page(unsigned long start,
28013 unsigned long size)
28014 {
28015@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
28016
28017 while (cursor < to) {
28018 if (!devmem_is_allowed(pfn)) {
28019+#ifdef CONFIG_GRKERNSEC_KMEM
28020+ gr_handle_mem_readwrite(from, to);
28021+#else
28022 printk(KERN_INFO
28023 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
28024 current->comm, from, to);
28025+#endif
28026 return 0;
28027 }
28028 cursor += PAGE_SIZE;
28029@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
28030 }
28031 return 1;
28032 }
28033+#elif defined(CONFIG_GRKERNSEC_KMEM)
28034+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28035+{
28036+ return 0;
28037+}
28038 #else
28039 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28040 {
28041@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
28042 #endif
28043
28044 while (count > 0) {
28045+ char *temp;
28046+
28047 /*
28048 * Handle first page in case it's not aligned
28049 */
28050@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
28051 if (!ptr)
28052 return -EFAULT;
28053
28054- if (copy_to_user(buf, ptr, sz)) {
28055+#ifdef CONFIG_PAX_USERCOPY
28056+ temp = kmalloc(sz, GFP_KERNEL);
28057+ if (!temp) {
28058+ unxlate_dev_mem_ptr(p, ptr);
28059+ return -ENOMEM;
28060+ }
28061+ memcpy(temp, ptr, sz);
28062+#else
28063+ temp = ptr;
28064+#endif
28065+
28066+ if (copy_to_user(buf, temp, sz)) {
28067+
28068+#ifdef CONFIG_PAX_USERCOPY
28069+ kfree(temp);
28070+#endif
28071+
28072 unxlate_dev_mem_ptr(p, ptr);
28073 return -EFAULT;
28074 }
28075
28076+#ifdef CONFIG_PAX_USERCOPY
28077+ kfree(temp);
28078+#endif
28079+
28080 unxlate_dev_mem_ptr(p, ptr);
28081
28082 buf += sz;
28083@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
28084 size_t count, loff_t *ppos)
28085 {
28086 unsigned long p = *ppos;
28087- ssize_t low_count, read, sz;
28088+ ssize_t low_count, read, sz, err = 0;
28089 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
28090- int err = 0;
28091
28092 read = 0;
28093 if (p < (unsigned long) high_memory) {
28094@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
28095 }
28096 #endif
28097 while (low_count > 0) {
28098+ char *temp;
28099+
28100 sz = size_inside_page(p, low_count);
28101
28102 /*
28103@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
28104 */
28105 kbuf = xlate_dev_kmem_ptr((char *)p);
28106
28107- if (copy_to_user(buf, kbuf, sz))
28108+#ifdef CONFIG_PAX_USERCOPY
28109+ temp = kmalloc(sz, GFP_KERNEL);
28110+ if (!temp)
28111+ return -ENOMEM;
28112+ memcpy(temp, kbuf, sz);
28113+#else
28114+ temp = kbuf;
28115+#endif
28116+
28117+ err = copy_to_user(buf, temp, sz);
28118+
28119+#ifdef CONFIG_PAX_USERCOPY
28120+ kfree(temp);
28121+#endif
28122+
28123+ if (err)
28124 return -EFAULT;
28125 buf += sz;
28126 p += sz;
28127@@ -889,6 +941,9 @@ static const struct memdev {
28128 #ifdef CONFIG_CRASH_DUMP
28129 [12] = { "oldmem", 0, &oldmem_fops, NULL },
28130 #endif
28131+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28132+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
28133+#endif
28134 };
28135
28136 static int memory_open(struct inode *inode, struct file *filp)
28137diff -urNp linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c
28138--- linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
28139+++ linux-2.6.32.45/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
28140@@ -29,6 +29,7 @@
28141 #include <linux/tty_driver.h>
28142 #include <linux/tty_flip.h>
28143 #include <linux/uaccess.h>
28144+#include <asm/local.h>
28145
28146 #include "tty.h"
28147 #include "network.h"
28148@@ -51,7 +52,7 @@ struct ipw_tty {
28149 int tty_type;
28150 struct ipw_network *network;
28151 struct tty_struct *linux_tty;
28152- int open_count;
28153+ local_t open_count;
28154 unsigned int control_lines;
28155 struct mutex ipw_tty_mutex;
28156 int tx_bytes_queued;
28157@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
28158 mutex_unlock(&tty->ipw_tty_mutex);
28159 return -ENODEV;
28160 }
28161- if (tty->open_count == 0)
28162+ if (local_read(&tty->open_count) == 0)
28163 tty->tx_bytes_queued = 0;
28164
28165- tty->open_count++;
28166+ local_inc(&tty->open_count);
28167
28168 tty->linux_tty = linux_tty;
28169 linux_tty->driver_data = tty;
28170@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
28171
28172 static void do_ipw_close(struct ipw_tty *tty)
28173 {
28174- tty->open_count--;
28175-
28176- if (tty->open_count == 0) {
28177+ if (local_dec_return(&tty->open_count) == 0) {
28178 struct tty_struct *linux_tty = tty->linux_tty;
28179
28180 if (linux_tty != NULL) {
28181@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
28182 return;
28183
28184 mutex_lock(&tty->ipw_tty_mutex);
28185- if (tty->open_count == 0) {
28186+ if (local_read(&tty->open_count) == 0) {
28187 mutex_unlock(&tty->ipw_tty_mutex);
28188 return;
28189 }
28190@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
28191 return;
28192 }
28193
28194- if (!tty->open_count) {
28195+ if (!local_read(&tty->open_count)) {
28196 mutex_unlock(&tty->ipw_tty_mutex);
28197 return;
28198 }
28199@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
28200 return -ENODEV;
28201
28202 mutex_lock(&tty->ipw_tty_mutex);
28203- if (!tty->open_count) {
28204+ if (!local_read(&tty->open_count)) {
28205 mutex_unlock(&tty->ipw_tty_mutex);
28206 return -EINVAL;
28207 }
28208@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
28209 if (!tty)
28210 return -ENODEV;
28211
28212- if (!tty->open_count)
28213+ if (!local_read(&tty->open_count))
28214 return -EINVAL;
28215
28216 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
28217@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
28218 if (!tty)
28219 return 0;
28220
28221- if (!tty->open_count)
28222+ if (!local_read(&tty->open_count))
28223 return 0;
28224
28225 return tty->tx_bytes_queued;
28226@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
28227 if (!tty)
28228 return -ENODEV;
28229
28230- if (!tty->open_count)
28231+ if (!local_read(&tty->open_count))
28232 return -EINVAL;
28233
28234 return get_control_lines(tty);
28235@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
28236 if (!tty)
28237 return -ENODEV;
28238
28239- if (!tty->open_count)
28240+ if (!local_read(&tty->open_count))
28241 return -EINVAL;
28242
28243 return set_control_lines(tty, set, clear);
28244@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
28245 if (!tty)
28246 return -ENODEV;
28247
28248- if (!tty->open_count)
28249+ if (!local_read(&tty->open_count))
28250 return -EINVAL;
28251
28252 /* FIXME: Exactly how is the tty object locked here .. */
28253@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
28254 against a parallel ioctl etc */
28255 mutex_lock(&ttyj->ipw_tty_mutex);
28256 }
28257- while (ttyj->open_count)
28258+ while (local_read(&ttyj->open_count))
28259 do_ipw_close(ttyj);
28260 ipwireless_disassociate_network_ttys(network,
28261 ttyj->channel_idx);
28262diff -urNp linux-2.6.32.45/drivers/char/pty.c linux-2.6.32.45/drivers/char/pty.c
28263--- linux-2.6.32.45/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
28264+++ linux-2.6.32.45/drivers/char/pty.c 2011-08-05 20:33:55.000000000 -0400
28265@@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
28266 register_sysctl_table(pty_root_table);
28267
28268 /* Now create the /dev/ptmx special device */
28269+ pax_open_kernel();
28270 tty_default_fops(&ptmx_fops);
28271- ptmx_fops.open = ptmx_open;
28272+ *(void **)&ptmx_fops.open = ptmx_open;
28273+ pax_close_kernel();
28274
28275 cdev_init(&ptmx_cdev, &ptmx_fops);
28276 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
28277diff -urNp linux-2.6.32.45/drivers/char/random.c linux-2.6.32.45/drivers/char/random.c
28278--- linux-2.6.32.45/drivers/char/random.c 2011-08-16 20:37:25.000000000 -0400
28279+++ linux-2.6.32.45/drivers/char/random.c 2011-08-16 20:43:23.000000000 -0400
28280@@ -254,8 +254,13 @@
28281 /*
28282 * Configuration information
28283 */
28284+#ifdef CONFIG_GRKERNSEC_RANDNET
28285+#define INPUT_POOL_WORDS 512
28286+#define OUTPUT_POOL_WORDS 128
28287+#else
28288 #define INPUT_POOL_WORDS 128
28289 #define OUTPUT_POOL_WORDS 32
28290+#endif
28291 #define SEC_XFER_SIZE 512
28292
28293 /*
28294@@ -292,10 +297,17 @@ static struct poolinfo {
28295 int poolwords;
28296 int tap1, tap2, tap3, tap4, tap5;
28297 } poolinfo_table[] = {
28298+#ifdef CONFIG_GRKERNSEC_RANDNET
28299+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
28300+ { 512, 411, 308, 208, 104, 1 },
28301+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
28302+ { 128, 103, 76, 51, 25, 1 },
28303+#else
28304 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
28305 { 128, 103, 76, 51, 25, 1 },
28306 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
28307 { 32, 26, 20, 14, 7, 1 },
28308+#endif
28309 #if 0
28310 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
28311 { 2048, 1638, 1231, 819, 411, 1 },
28312@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
28313 #include <linux/sysctl.h>
28314
28315 static int min_read_thresh = 8, min_write_thresh;
28316-static int max_read_thresh = INPUT_POOL_WORDS * 32;
28317+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
28318 static int max_write_thresh = INPUT_POOL_WORDS * 32;
28319 static char sysctl_bootid[16];
28320
28321diff -urNp linux-2.6.32.45/drivers/char/rocket.c linux-2.6.32.45/drivers/char/rocket.c
28322--- linux-2.6.32.45/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
28323+++ linux-2.6.32.45/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
28324@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
28325 struct rocket_ports tmp;
28326 int board;
28327
28328+ pax_track_stack();
28329+
28330 if (!retports)
28331 return -EFAULT;
28332 memset(&tmp, 0, sizeof (tmp));
28333diff -urNp linux-2.6.32.45/drivers/char/sonypi.c linux-2.6.32.45/drivers/char/sonypi.c
28334--- linux-2.6.32.45/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
28335+++ linux-2.6.32.45/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
28336@@ -55,6 +55,7 @@
28337 #include <asm/uaccess.h>
28338 #include <asm/io.h>
28339 #include <asm/system.h>
28340+#include <asm/local.h>
28341
28342 #include <linux/sonypi.h>
28343
28344@@ -491,7 +492,7 @@ static struct sonypi_device {
28345 spinlock_t fifo_lock;
28346 wait_queue_head_t fifo_proc_list;
28347 struct fasync_struct *fifo_async;
28348- int open_count;
28349+ local_t open_count;
28350 int model;
28351 struct input_dev *input_jog_dev;
28352 struct input_dev *input_key_dev;
28353@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
28354 static int sonypi_misc_release(struct inode *inode, struct file *file)
28355 {
28356 mutex_lock(&sonypi_device.lock);
28357- sonypi_device.open_count--;
28358+ local_dec(&sonypi_device.open_count);
28359 mutex_unlock(&sonypi_device.lock);
28360 return 0;
28361 }
28362@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
28363 lock_kernel();
28364 mutex_lock(&sonypi_device.lock);
28365 /* Flush input queue on first open */
28366- if (!sonypi_device.open_count)
28367+ if (!local_read(&sonypi_device.open_count))
28368 kfifo_reset(sonypi_device.fifo);
28369- sonypi_device.open_count++;
28370+ local_inc(&sonypi_device.open_count);
28371 mutex_unlock(&sonypi_device.lock);
28372 unlock_kernel();
28373 return 0;
28374diff -urNp linux-2.6.32.45/drivers/char/stallion.c linux-2.6.32.45/drivers/char/stallion.c
28375--- linux-2.6.32.45/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
28376+++ linux-2.6.32.45/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
28377@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
28378 struct stlport stl_dummyport;
28379 struct stlport *portp;
28380
28381+ pax_track_stack();
28382+
28383 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
28384 return -EFAULT;
28385 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
28386diff -urNp linux-2.6.32.45/drivers/char/tpm/tpm_bios.c linux-2.6.32.45/drivers/char/tpm/tpm_bios.c
28387--- linux-2.6.32.45/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
28388+++ linux-2.6.32.45/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
28389@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
28390 event = addr;
28391
28392 if ((event->event_type == 0 && event->event_size == 0) ||
28393- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
28394+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
28395 return NULL;
28396
28397 return addr;
28398@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
28399 return NULL;
28400
28401 if ((event->event_type == 0 && event->event_size == 0) ||
28402- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
28403+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
28404 return NULL;
28405
28406 (*pos)++;
28407@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
28408 int i;
28409
28410 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
28411- seq_putc(m, data[i]);
28412+ if (!seq_putc(m, data[i]))
28413+ return -EFAULT;
28414
28415 return 0;
28416 }
28417@@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
28418 log->bios_event_log_end = log->bios_event_log + len;
28419
28420 virt = acpi_os_map_memory(start, len);
28421+ if (!virt) {
28422+ kfree(log->bios_event_log);
28423+ log->bios_event_log = NULL;
28424+ return -EFAULT;
28425+ }
28426
28427 memcpy(log->bios_event_log, virt, len);
28428
28429diff -urNp linux-2.6.32.45/drivers/char/tpm/tpm.c linux-2.6.32.45/drivers/char/tpm/tpm.c
28430--- linux-2.6.32.45/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
28431+++ linux-2.6.32.45/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
28432@@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
28433 chip->vendor.req_complete_val)
28434 goto out_recv;
28435
28436- if ((status == chip->vendor.req_canceled)) {
28437+ if (status == chip->vendor.req_canceled) {
28438 dev_err(chip->dev, "Operation Canceled\n");
28439 rc = -ECANCELED;
28440 goto out;
28441@@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
28442
28443 struct tpm_chip *chip = dev_get_drvdata(dev);
28444
28445+ pax_track_stack();
28446+
28447 tpm_cmd.header.in = tpm_readpubek_header;
28448 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
28449 "attempting to read the PUBEK");
28450diff -urNp linux-2.6.32.45/drivers/char/tty_io.c linux-2.6.32.45/drivers/char/tty_io.c
28451--- linux-2.6.32.45/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
28452+++ linux-2.6.32.45/drivers/char/tty_io.c 2011-08-05 20:33:55.000000000 -0400
28453@@ -2582,8 +2582,10 @@ long tty_ioctl(struct file *file, unsign
28454 return retval;
28455 }
28456
28457+EXPORT_SYMBOL(tty_ioctl);
28458+
28459 #ifdef CONFIG_COMPAT
28460-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
28461+long tty_compat_ioctl(struct file *file, unsigned int cmd,
28462 unsigned long arg)
28463 {
28464 struct inode *inode = file->f_dentry->d_inode;
28465@@ -2607,6 +2609,8 @@ static long tty_compat_ioctl(struct file
28466
28467 return retval;
28468 }
28469+
28470+EXPORT_SYMBOL(tty_compat_ioctl);
28471 #endif
28472
28473 /*
28474@@ -3052,7 +3056,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
28475
28476 void tty_default_fops(struct file_operations *fops)
28477 {
28478- *fops = tty_fops;
28479+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
28480 }
28481
28482 /*
28483diff -urNp linux-2.6.32.45/drivers/char/tty_ldisc.c linux-2.6.32.45/drivers/char/tty_ldisc.c
28484--- linux-2.6.32.45/drivers/char/tty_ldisc.c 2011-07-13 17:23:04.000000000 -0400
28485+++ linux-2.6.32.45/drivers/char/tty_ldisc.c 2011-07-13 17:23:18.000000000 -0400
28486@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
28487 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
28488 struct tty_ldisc_ops *ldo = ld->ops;
28489
28490- ldo->refcount--;
28491+ atomic_dec(&ldo->refcount);
28492 module_put(ldo->owner);
28493 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28494
28495@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
28496 spin_lock_irqsave(&tty_ldisc_lock, flags);
28497 tty_ldiscs[disc] = new_ldisc;
28498 new_ldisc->num = disc;
28499- new_ldisc->refcount = 0;
28500+ atomic_set(&new_ldisc->refcount, 0);
28501 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28502
28503 return ret;
28504@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
28505 return -EINVAL;
28506
28507 spin_lock_irqsave(&tty_ldisc_lock, flags);
28508- if (tty_ldiscs[disc]->refcount)
28509+ if (atomic_read(&tty_ldiscs[disc]->refcount))
28510 ret = -EBUSY;
28511 else
28512 tty_ldiscs[disc] = NULL;
28513@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
28514 if (ldops) {
28515 ret = ERR_PTR(-EAGAIN);
28516 if (try_module_get(ldops->owner)) {
28517- ldops->refcount++;
28518+ atomic_inc(&ldops->refcount);
28519 ret = ldops;
28520 }
28521 }
28522@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
28523 unsigned long flags;
28524
28525 spin_lock_irqsave(&tty_ldisc_lock, flags);
28526- ldops->refcount--;
28527+ atomic_dec(&ldops->refcount);
28528 module_put(ldops->owner);
28529 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28530 }
28531diff -urNp linux-2.6.32.45/drivers/char/virtio_console.c linux-2.6.32.45/drivers/char/virtio_console.c
28532--- linux-2.6.32.45/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
28533+++ linux-2.6.32.45/drivers/char/virtio_console.c 2011-08-05 20:33:55.000000000 -0400
28534@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *
28535 * virtqueue, so we let the drivers do some boutique early-output thing. */
28536 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
28537 {
28538- virtio_cons.put_chars = put_chars;
28539+ pax_open_kernel();
28540+ *(void **)&virtio_cons.put_chars = put_chars;
28541+ pax_close_kernel();
28542 return hvc_instantiate(0, 0, &virtio_cons);
28543 }
28544
28545@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(stru
28546 out_vq = vqs[1];
28547
28548 /* Start using the new console output. */
28549- virtio_cons.get_chars = get_chars;
28550- virtio_cons.put_chars = put_chars;
28551- virtio_cons.notifier_add = notifier_add_vio;
28552- virtio_cons.notifier_del = notifier_del_vio;
28553- virtio_cons.notifier_hangup = notifier_del_vio;
28554+ pax_open_kernel();
28555+ *(void **)&virtio_cons.get_chars = get_chars;
28556+ *(void **)&virtio_cons.put_chars = put_chars;
28557+ *(void **)&virtio_cons.notifier_add = notifier_add_vio;
28558+ *(void **)&virtio_cons.notifier_del = notifier_del_vio;
28559+ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
28560+ pax_close_kernel();
28561
28562 /* The first argument of hvc_alloc() is the virtual console number, so
28563 * we use zero. The second argument is the parameter for the
28564diff -urNp linux-2.6.32.45/drivers/char/vt.c linux-2.6.32.45/drivers/char/vt.c
28565--- linux-2.6.32.45/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
28566+++ linux-2.6.32.45/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
28567@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
28568
28569 static void notify_write(struct vc_data *vc, unsigned int unicode)
28570 {
28571- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
28572+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
28573 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
28574 }
28575
28576diff -urNp linux-2.6.32.45/drivers/char/vt_ioctl.c linux-2.6.32.45/drivers/char/vt_ioctl.c
28577--- linux-2.6.32.45/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28578+++ linux-2.6.32.45/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28579@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28580 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
28581 return -EFAULT;
28582
28583- if (!capable(CAP_SYS_TTY_CONFIG))
28584- perm = 0;
28585-
28586 switch (cmd) {
28587 case KDGKBENT:
28588 key_map = key_maps[s];
28589@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28590 val = (i ? K_HOLE : K_NOSUCHMAP);
28591 return put_user(val, &user_kbe->kb_value);
28592 case KDSKBENT:
28593+ if (!capable(CAP_SYS_TTY_CONFIG))
28594+ perm = 0;
28595+
28596 if (!perm)
28597 return -EPERM;
28598+
28599 if (!i && v == K_NOSUCHMAP) {
28600 /* deallocate map */
28601 key_map = key_maps[s];
28602@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28603 int i, j, k;
28604 int ret;
28605
28606- if (!capable(CAP_SYS_TTY_CONFIG))
28607- perm = 0;
28608-
28609 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
28610 if (!kbs) {
28611 ret = -ENOMEM;
28612@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28613 kfree(kbs);
28614 return ((p && *p) ? -EOVERFLOW : 0);
28615 case KDSKBSENT:
28616+ if (!capable(CAP_SYS_TTY_CONFIG))
28617+ perm = 0;
28618+
28619 if (!perm) {
28620 ret = -EPERM;
28621 goto reterr;
28622diff -urNp linux-2.6.32.45/drivers/cpufreq/cpufreq.c linux-2.6.32.45/drivers/cpufreq/cpufreq.c
28623--- linux-2.6.32.45/drivers/cpufreq/cpufreq.c 2011-06-25 12:55:34.000000000 -0400
28624+++ linux-2.6.32.45/drivers/cpufreq/cpufreq.c 2011-06-25 12:56:37.000000000 -0400
28625@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
28626 complete(&policy->kobj_unregister);
28627 }
28628
28629-static struct sysfs_ops sysfs_ops = {
28630+static const struct sysfs_ops sysfs_ops = {
28631 .show = show,
28632 .store = store,
28633 };
28634diff -urNp linux-2.6.32.45/drivers/cpuidle/sysfs.c linux-2.6.32.45/drivers/cpuidle/sysfs.c
28635--- linux-2.6.32.45/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
28636+++ linux-2.6.32.45/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
28637@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
28638 return ret;
28639 }
28640
28641-static struct sysfs_ops cpuidle_sysfs_ops = {
28642+static const struct sysfs_ops cpuidle_sysfs_ops = {
28643 .show = cpuidle_show,
28644 .store = cpuidle_store,
28645 };
28646@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
28647 return ret;
28648 }
28649
28650-static struct sysfs_ops cpuidle_state_sysfs_ops = {
28651+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
28652 .show = cpuidle_state_show,
28653 };
28654
28655@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
28656 .release = cpuidle_state_sysfs_release,
28657 };
28658
28659-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28660+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28661 {
28662 kobject_put(&device->kobjs[i]->kobj);
28663 wait_for_completion(&device->kobjs[i]->kobj_unregister);
28664diff -urNp linux-2.6.32.45/drivers/crypto/hifn_795x.c linux-2.6.32.45/drivers/crypto/hifn_795x.c
28665--- linux-2.6.32.45/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
28666+++ linux-2.6.32.45/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
28667@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
28668 0xCA, 0x34, 0x2B, 0x2E};
28669 struct scatterlist sg;
28670
28671+ pax_track_stack();
28672+
28673 memset(src, 0, sizeof(src));
28674 memset(ctx.key, 0, sizeof(ctx.key));
28675
28676diff -urNp linux-2.6.32.45/drivers/crypto/padlock-aes.c linux-2.6.32.45/drivers/crypto/padlock-aes.c
28677--- linux-2.6.32.45/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
28678+++ linux-2.6.32.45/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
28679@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
28680 struct crypto_aes_ctx gen_aes;
28681 int cpu;
28682
28683+ pax_track_stack();
28684+
28685 if (key_len % 8) {
28686 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
28687 return -EINVAL;
28688diff -urNp linux-2.6.32.45/drivers/dma/ioat/dma.c linux-2.6.32.45/drivers/dma/ioat/dma.c
28689--- linux-2.6.32.45/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
28690+++ linux-2.6.32.45/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
28691@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
28692 return entry->show(&chan->common, page);
28693 }
28694
28695-struct sysfs_ops ioat_sysfs_ops = {
28696+const struct sysfs_ops ioat_sysfs_ops = {
28697 .show = ioat_attr_show,
28698 };
28699
28700diff -urNp linux-2.6.32.45/drivers/dma/ioat/dma.h linux-2.6.32.45/drivers/dma/ioat/dma.h
28701--- linux-2.6.32.45/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
28702+++ linux-2.6.32.45/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
28703@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
28704 unsigned long *phys_complete);
28705 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
28706 void ioat_kobject_del(struct ioatdma_device *device);
28707-extern struct sysfs_ops ioat_sysfs_ops;
28708+extern const struct sysfs_ops ioat_sysfs_ops;
28709 extern struct ioat_sysfs_entry ioat_version_attr;
28710 extern struct ioat_sysfs_entry ioat_cap_attr;
28711 #endif /* IOATDMA_H */
28712diff -urNp linux-2.6.32.45/drivers/edac/edac_device_sysfs.c linux-2.6.32.45/drivers/edac/edac_device_sysfs.c
28713--- linux-2.6.32.45/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28714+++ linux-2.6.32.45/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28715@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
28716 }
28717
28718 /* edac_dev file operations for an 'ctl_info' */
28719-static struct sysfs_ops device_ctl_info_ops = {
28720+static const struct sysfs_ops device_ctl_info_ops = {
28721 .show = edac_dev_ctl_info_show,
28722 .store = edac_dev_ctl_info_store
28723 };
28724@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
28725 }
28726
28727 /* edac_dev file operations for an 'instance' */
28728-static struct sysfs_ops device_instance_ops = {
28729+static const struct sysfs_ops device_instance_ops = {
28730 .show = edac_dev_instance_show,
28731 .store = edac_dev_instance_store
28732 };
28733@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
28734 }
28735
28736 /* edac_dev file operations for a 'block' */
28737-static struct sysfs_ops device_block_ops = {
28738+static const struct sysfs_ops device_block_ops = {
28739 .show = edac_dev_block_show,
28740 .store = edac_dev_block_store
28741 };
28742diff -urNp linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c
28743--- linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28744+++ linux-2.6.32.45/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28745@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
28746 return -EIO;
28747 }
28748
28749-static struct sysfs_ops csrowfs_ops = {
28750+static const struct sysfs_ops csrowfs_ops = {
28751 .show = csrowdev_show,
28752 .store = csrowdev_store
28753 };
28754@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
28755 }
28756
28757 /* Intermediate show/store table */
28758-static struct sysfs_ops mci_ops = {
28759+static const struct sysfs_ops mci_ops = {
28760 .show = mcidev_show,
28761 .store = mcidev_store
28762 };
28763diff -urNp linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c
28764--- linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28765+++ linux-2.6.32.45/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
28766@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
28767 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
28768 static int edac_pci_poll_msec = 1000; /* one second workq period */
28769
28770-static atomic_t pci_parity_count = ATOMIC_INIT(0);
28771-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
28772+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
28773+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
28774
28775 static struct kobject *edac_pci_top_main_kobj;
28776 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
28777@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
28778 }
28779
28780 /* fs_ops table */
28781-static struct sysfs_ops pci_instance_ops = {
28782+static const struct sysfs_ops pci_instance_ops = {
28783 .show = edac_pci_instance_show,
28784 .store = edac_pci_instance_store
28785 };
28786@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
28787 return -EIO;
28788 }
28789
28790-static struct sysfs_ops edac_pci_sysfs_ops = {
28791+static const struct sysfs_ops edac_pci_sysfs_ops = {
28792 .show = edac_pci_dev_show,
28793 .store = edac_pci_dev_store
28794 };
28795@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
28796 edac_printk(KERN_CRIT, EDAC_PCI,
28797 "Signaled System Error on %s\n",
28798 pci_name(dev));
28799- atomic_inc(&pci_nonparity_count);
28800+ atomic_inc_unchecked(&pci_nonparity_count);
28801 }
28802
28803 if (status & (PCI_STATUS_PARITY)) {
28804@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
28805 "Master Data Parity Error on %s\n",
28806 pci_name(dev));
28807
28808- atomic_inc(&pci_parity_count);
28809+ atomic_inc_unchecked(&pci_parity_count);
28810 }
28811
28812 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28813@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
28814 "Detected Parity Error on %s\n",
28815 pci_name(dev));
28816
28817- atomic_inc(&pci_parity_count);
28818+ atomic_inc_unchecked(&pci_parity_count);
28819 }
28820 }
28821
28822@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
28823 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
28824 "Signaled System Error on %s\n",
28825 pci_name(dev));
28826- atomic_inc(&pci_nonparity_count);
28827+ atomic_inc_unchecked(&pci_nonparity_count);
28828 }
28829
28830 if (status & (PCI_STATUS_PARITY)) {
28831@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
28832 "Master Data Parity Error on "
28833 "%s\n", pci_name(dev));
28834
28835- atomic_inc(&pci_parity_count);
28836+ atomic_inc_unchecked(&pci_parity_count);
28837 }
28838
28839 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28840@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
28841 "Detected Parity Error on %s\n",
28842 pci_name(dev));
28843
28844- atomic_inc(&pci_parity_count);
28845+ atomic_inc_unchecked(&pci_parity_count);
28846 }
28847 }
28848 }
28849@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
28850 if (!check_pci_errors)
28851 return;
28852
28853- before_count = atomic_read(&pci_parity_count);
28854+ before_count = atomic_read_unchecked(&pci_parity_count);
28855
28856 /* scan all PCI devices looking for a Parity Error on devices and
28857 * bridges.
28858@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
28859 /* Only if operator has selected panic on PCI Error */
28860 if (edac_pci_get_panic_on_pe()) {
28861 /* If the count is different 'after' from 'before' */
28862- if (before_count != atomic_read(&pci_parity_count))
28863+ if (before_count != atomic_read_unchecked(&pci_parity_count))
28864 panic("EDAC: PCI Parity Error");
28865 }
28866 }
28867diff -urNp linux-2.6.32.45/drivers/firewire/core-card.c linux-2.6.32.45/drivers/firewire/core-card.c
28868--- linux-2.6.32.45/drivers/firewire/core-card.c 2011-03-27 14:31:47.000000000 -0400
28869+++ linux-2.6.32.45/drivers/firewire/core-card.c 2011-08-05 20:33:55.000000000 -0400
28870@@ -569,8 +569,10 @@ void fw_core_remove_card(struct fw_card
28871 mutex_unlock(&card_mutex);
28872
28873 /* Switch off most of the card driver interface. */
28874- dummy_driver.free_iso_context = card->driver->free_iso_context;
28875- dummy_driver.stop_iso = card->driver->stop_iso;
28876+ pax_open_kernel();
28877+ *(void **)&dummy_driver.free_iso_context = card->driver->free_iso_context;
28878+ *(void **)&dummy_driver.stop_iso = card->driver->stop_iso;
28879+ pax_close_kernel();
28880 card->driver = &dummy_driver;
28881
28882 fw_destroy_nodes(card);
28883diff -urNp linux-2.6.32.45/drivers/firewire/core-cdev.c linux-2.6.32.45/drivers/firewire/core-cdev.c
28884--- linux-2.6.32.45/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
28885+++ linux-2.6.32.45/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
28886@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
28887 int ret;
28888
28889 if ((request->channels == 0 && request->bandwidth == 0) ||
28890- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
28891- request->bandwidth < 0)
28892+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
28893 return -EINVAL;
28894
28895 r = kmalloc(sizeof(*r), GFP_KERNEL);
28896diff -urNp linux-2.6.32.45/drivers/firewire/core-transaction.c linux-2.6.32.45/drivers/firewire/core-transaction.c
28897--- linux-2.6.32.45/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
28898+++ linux-2.6.32.45/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
28899@@ -36,6 +36,7 @@
28900 #include <linux/string.h>
28901 #include <linux/timer.h>
28902 #include <linux/types.h>
28903+#include <linux/sched.h>
28904
28905 #include <asm/byteorder.h>
28906
28907@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
28908 struct transaction_callback_data d;
28909 struct fw_transaction t;
28910
28911+ pax_track_stack();
28912+
28913 init_completion(&d.done);
28914 d.payload = payload;
28915 fw_send_request(card, &t, tcode, destination_id, generation, speed,
28916diff -urNp linux-2.6.32.45/drivers/firmware/dmi_scan.c linux-2.6.32.45/drivers/firmware/dmi_scan.c
28917--- linux-2.6.32.45/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
28918+++ linux-2.6.32.45/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
28919@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
28920 }
28921 }
28922 else {
28923- /*
28924- * no iounmap() for that ioremap(); it would be a no-op, but
28925- * it's so early in setup that sucker gets confused into doing
28926- * what it shouldn't if we actually call it.
28927- */
28928 p = dmi_ioremap(0xF0000, 0x10000);
28929 if (p == NULL)
28930 goto error;
28931diff -urNp linux-2.6.32.45/drivers/firmware/edd.c linux-2.6.32.45/drivers/firmware/edd.c
28932--- linux-2.6.32.45/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
28933+++ linux-2.6.32.45/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
28934@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
28935 return ret;
28936 }
28937
28938-static struct sysfs_ops edd_attr_ops = {
28939+static const struct sysfs_ops edd_attr_ops = {
28940 .show = edd_attr_show,
28941 };
28942
28943diff -urNp linux-2.6.32.45/drivers/firmware/efivars.c linux-2.6.32.45/drivers/firmware/efivars.c
28944--- linux-2.6.32.45/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
28945+++ linux-2.6.32.45/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
28946@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
28947 return ret;
28948 }
28949
28950-static struct sysfs_ops efivar_attr_ops = {
28951+static const struct sysfs_ops efivar_attr_ops = {
28952 .show = efivar_attr_show,
28953 .store = efivar_attr_store,
28954 };
28955diff -urNp linux-2.6.32.45/drivers/firmware/iscsi_ibft.c linux-2.6.32.45/drivers/firmware/iscsi_ibft.c
28956--- linux-2.6.32.45/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
28957+++ linux-2.6.32.45/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
28958@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
28959 return ret;
28960 }
28961
28962-static struct sysfs_ops ibft_attr_ops = {
28963+static const struct sysfs_ops ibft_attr_ops = {
28964 .show = ibft_show_attribute,
28965 };
28966
28967diff -urNp linux-2.6.32.45/drivers/firmware/memmap.c linux-2.6.32.45/drivers/firmware/memmap.c
28968--- linux-2.6.32.45/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
28969+++ linux-2.6.32.45/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
28970@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
28971 NULL
28972 };
28973
28974-static struct sysfs_ops memmap_attr_ops = {
28975+static const struct sysfs_ops memmap_attr_ops = {
28976 .show = memmap_attr_show,
28977 };
28978
28979diff -urNp linux-2.6.32.45/drivers/gpio/vr41xx_giu.c linux-2.6.32.45/drivers/gpio/vr41xx_giu.c
28980--- linux-2.6.32.45/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
28981+++ linux-2.6.32.45/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
28982@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
28983 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
28984 maskl, pendl, maskh, pendh);
28985
28986- atomic_inc(&irq_err_count);
28987+ atomic_inc_unchecked(&irq_err_count);
28988
28989 return -EINVAL;
28990 }
28991diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c
28992--- linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
28993+++ linux-2.6.32.45/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
28994@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
28995 struct drm_crtc *tmp;
28996 int crtc_mask = 1;
28997
28998- WARN(!crtc, "checking null crtc?");
28999+ BUG_ON(!crtc);
29000
29001 dev = crtc->dev;
29002
29003@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
29004
29005 adjusted_mode = drm_mode_duplicate(dev, mode);
29006
29007+ pax_track_stack();
29008+
29009 crtc->enabled = drm_helper_crtc_in_use(crtc);
29010
29011 if (!crtc->enabled)
29012diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_drv.c linux-2.6.32.45/drivers/gpu/drm/drm_drv.c
29013--- linux-2.6.32.45/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
29014+++ linux-2.6.32.45/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
29015@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
29016 char *kdata = NULL;
29017
29018 atomic_inc(&dev->ioctl_count);
29019- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
29020+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
29021 ++file_priv->ioctl_count;
29022
29023 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
29024diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_fops.c linux-2.6.32.45/drivers/gpu/drm/drm_fops.c
29025--- linux-2.6.32.45/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
29026+++ linux-2.6.32.45/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
29027@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
29028 }
29029
29030 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
29031- atomic_set(&dev->counts[i], 0);
29032+ atomic_set_unchecked(&dev->counts[i], 0);
29033
29034 dev->sigdata.lock = NULL;
29035
29036@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
29037
29038 retcode = drm_open_helper(inode, filp, dev);
29039 if (!retcode) {
29040- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
29041+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
29042 spin_lock(&dev->count_lock);
29043- if (!dev->open_count++) {
29044+ if (local_inc_return(&dev->open_count) == 1) {
29045 spin_unlock(&dev->count_lock);
29046 retcode = drm_setup(dev);
29047 goto out;
29048@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
29049
29050 lock_kernel();
29051
29052- DRM_DEBUG("open_count = %d\n", dev->open_count);
29053+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
29054
29055 if (dev->driver->preclose)
29056 dev->driver->preclose(dev, file_priv);
29057@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
29058 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
29059 task_pid_nr(current),
29060 (long)old_encode_dev(file_priv->minor->device),
29061- dev->open_count);
29062+ local_read(&dev->open_count));
29063
29064 /* if the master has gone away we can't do anything with the lock */
29065 if (file_priv->minor->master)
29066@@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
29067 * End inline drm_release
29068 */
29069
29070- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
29071+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
29072 spin_lock(&dev->count_lock);
29073- if (!--dev->open_count) {
29074+ if (local_dec_and_test(&dev->open_count)) {
29075 if (atomic_read(&dev->ioctl_count)) {
29076 DRM_ERROR("Device busy: %d\n",
29077 atomic_read(&dev->ioctl_count));
29078diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_gem.c linux-2.6.32.45/drivers/gpu/drm/drm_gem.c
29079--- linux-2.6.32.45/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
29080+++ linux-2.6.32.45/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
29081@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
29082 spin_lock_init(&dev->object_name_lock);
29083 idr_init(&dev->object_name_idr);
29084 atomic_set(&dev->object_count, 0);
29085- atomic_set(&dev->object_memory, 0);
29086+ atomic_set_unchecked(&dev->object_memory, 0);
29087 atomic_set(&dev->pin_count, 0);
29088- atomic_set(&dev->pin_memory, 0);
29089+ atomic_set_unchecked(&dev->pin_memory, 0);
29090 atomic_set(&dev->gtt_count, 0);
29091- atomic_set(&dev->gtt_memory, 0);
29092+ atomic_set_unchecked(&dev->gtt_memory, 0);
29093
29094 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
29095 if (!mm) {
29096@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
29097 goto fput;
29098 }
29099 atomic_inc(&dev->object_count);
29100- atomic_add(obj->size, &dev->object_memory);
29101+ atomic_add_unchecked(obj->size, &dev->object_memory);
29102 return obj;
29103 fput:
29104 fput(obj->filp);
29105@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
29106
29107 fput(obj->filp);
29108 atomic_dec(&dev->object_count);
29109- atomic_sub(obj->size, &dev->object_memory);
29110+ atomic_sub_unchecked(obj->size, &dev->object_memory);
29111 kfree(obj);
29112 }
29113 EXPORT_SYMBOL(drm_gem_object_free);
29114diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_info.c linux-2.6.32.45/drivers/gpu/drm/drm_info.c
29115--- linux-2.6.32.45/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
29116+++ linux-2.6.32.45/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
29117@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
29118 struct drm_local_map *map;
29119 struct drm_map_list *r_list;
29120
29121- /* Hardcoded from _DRM_FRAME_BUFFER,
29122- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
29123- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
29124- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
29125+ static const char * const types[] = {
29126+ [_DRM_FRAME_BUFFER] = "FB",
29127+ [_DRM_REGISTERS] = "REG",
29128+ [_DRM_SHM] = "SHM",
29129+ [_DRM_AGP] = "AGP",
29130+ [_DRM_SCATTER_GATHER] = "SG",
29131+ [_DRM_CONSISTENT] = "PCI",
29132+ [_DRM_GEM] = "GEM" };
29133 const char *type;
29134 int i;
29135
29136@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
29137 map = r_list->map;
29138 if (!map)
29139 continue;
29140- if (map->type < 0 || map->type > 5)
29141+ if (map->type >= ARRAY_SIZE(types))
29142 type = "??";
29143 else
29144 type = types[map->type];
29145@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
29146 struct drm_device *dev = node->minor->dev;
29147
29148 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
29149- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
29150+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
29151 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
29152- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
29153- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
29154+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
29155+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
29156 seq_printf(m, "%d gtt total\n", dev->gtt_total);
29157 return 0;
29158 }
29159@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
29160 mutex_lock(&dev->struct_mutex);
29161 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
29162 atomic_read(&dev->vma_count),
29163+#ifdef CONFIG_GRKERNSEC_HIDESYM
29164+ NULL, 0);
29165+#else
29166 high_memory, (u64)virt_to_phys(high_memory));
29167+#endif
29168
29169 list_for_each_entry(pt, &dev->vmalist, head) {
29170 vma = pt->vma;
29171@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
29172 continue;
29173 seq_printf(m,
29174 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
29175- pt->pid, vma->vm_start, vma->vm_end,
29176+ pt->pid,
29177+#ifdef CONFIG_GRKERNSEC_HIDESYM
29178+ 0, 0,
29179+#else
29180+ vma->vm_start, vma->vm_end,
29181+#endif
29182 vma->vm_flags & VM_READ ? 'r' : '-',
29183 vma->vm_flags & VM_WRITE ? 'w' : '-',
29184 vma->vm_flags & VM_EXEC ? 'x' : '-',
29185 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
29186 vma->vm_flags & VM_LOCKED ? 'l' : '-',
29187 vma->vm_flags & VM_IO ? 'i' : '-',
29188+#ifdef CONFIG_GRKERNSEC_HIDESYM
29189+ 0);
29190+#else
29191 vma->vm_pgoff);
29192+#endif
29193
29194 #if defined(__i386__)
29195 pgprot = pgprot_val(vma->vm_page_prot);
29196diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c
29197--- linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
29198+++ linux-2.6.32.45/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
29199@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
29200 stats->data[i].value =
29201 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
29202 else
29203- stats->data[i].value = atomic_read(&dev->counts[i]);
29204+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
29205 stats->data[i].type = dev->types[i];
29206 }
29207
29208diff -urNp linux-2.6.32.45/drivers/gpu/drm/drm_lock.c linux-2.6.32.45/drivers/gpu/drm/drm_lock.c
29209--- linux-2.6.32.45/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
29210+++ linux-2.6.32.45/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
29211@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
29212 if (drm_lock_take(&master->lock, lock->context)) {
29213 master->lock.file_priv = file_priv;
29214 master->lock.lock_time = jiffies;
29215- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
29216+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
29217 break; /* Got lock */
29218 }
29219
29220@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
29221 return -EINVAL;
29222 }
29223
29224- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
29225+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
29226
29227 /* kernel_context_switch isn't used by any of the x86 drm
29228 * modules but is required by the Sparc driver.
29229diff -urNp linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c
29230--- linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
29231+++ linux-2.6.32.45/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
29232@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
29233 dma->buflist[vertex->idx],
29234 vertex->discard, vertex->used);
29235
29236- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29237- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29238+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29239+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29240 sarea_priv->last_enqueue = dev_priv->counter - 1;
29241 sarea_priv->last_dispatch = (int)hw_status[5];
29242
29243@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
29244 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
29245 mc->last_render);
29246
29247- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29248- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29249+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29250+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29251 sarea_priv->last_enqueue = dev_priv->counter - 1;
29252 sarea_priv->last_dispatch = (int)hw_status[5];
29253
29254diff -urNp linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h
29255--- linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
29256+++ linux-2.6.32.45/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
29257@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
29258 int page_flipping;
29259
29260 wait_queue_head_t irq_queue;
29261- atomic_t irq_received;
29262- atomic_t irq_emitted;
29263+ atomic_unchecked_t irq_received;
29264+ atomic_unchecked_t irq_emitted;
29265
29266 int front_offset;
29267 } drm_i810_private_t;
29268diff -urNp linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h
29269--- linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
29270+++ linux-2.6.32.45/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
29271@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
29272 int page_flipping;
29273
29274 wait_queue_head_t irq_queue;
29275- atomic_t irq_received;
29276- atomic_t irq_emitted;
29277+ atomic_unchecked_t irq_received;
29278+ atomic_unchecked_t irq_emitted;
29279
29280 int use_mi_batchbuffer_start;
29281
29282diff -urNp linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c
29283--- linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
29284+++ linux-2.6.32.45/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
29285@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
29286
29287 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
29288
29289- atomic_inc(&dev_priv->irq_received);
29290+ atomic_inc_unchecked(&dev_priv->irq_received);
29291 wake_up_interruptible(&dev_priv->irq_queue);
29292
29293 return IRQ_HANDLED;
29294@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
29295
29296 DRM_DEBUG("%s\n", __func__);
29297
29298- atomic_inc(&dev_priv->irq_emitted);
29299+ atomic_inc_unchecked(&dev_priv->irq_emitted);
29300
29301 BEGIN_LP_RING(2);
29302 OUT_RING(0);
29303 OUT_RING(GFX_OP_USER_INTERRUPT);
29304 ADVANCE_LP_RING();
29305
29306- return atomic_read(&dev_priv->irq_emitted);
29307+ return atomic_read_unchecked(&dev_priv->irq_emitted);
29308 }
29309
29310 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
29311@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
29312
29313 DRM_DEBUG("%s\n", __func__);
29314
29315- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29316+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29317 return 0;
29318
29319 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
29320@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
29321
29322 for (;;) {
29323 __set_current_state(TASK_INTERRUPTIBLE);
29324- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29325+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29326 break;
29327 if ((signed)(end - jiffies) <= 0) {
29328 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
29329@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
29330 I830_WRITE16(I830REG_HWSTAM, 0xffff);
29331 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
29332 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
29333- atomic_set(&dev_priv->irq_received, 0);
29334- atomic_set(&dev_priv->irq_emitted, 0);
29335+ atomic_set_unchecked(&dev_priv->irq_received, 0);
29336+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
29337 init_waitqueue_head(&dev_priv->irq_queue);
29338 }
29339
29340diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c
29341--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
29342+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
29343@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
29344 }
29345 }
29346
29347-struct intel_dvo_dev_ops ch7017_ops = {
29348+const struct intel_dvo_dev_ops ch7017_ops = {
29349 .init = ch7017_init,
29350 .detect = ch7017_detect,
29351 .mode_valid = ch7017_mode_valid,
29352diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c
29353--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
29354+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
29355@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
29356 }
29357 }
29358
29359-struct intel_dvo_dev_ops ch7xxx_ops = {
29360+const struct intel_dvo_dev_ops ch7xxx_ops = {
29361 .init = ch7xxx_init,
29362 .detect = ch7xxx_detect,
29363 .mode_valid = ch7xxx_mode_valid,
29364diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h
29365--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
29366+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
29367@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
29368 *
29369 * \return singly-linked list of modes or NULL if no modes found.
29370 */
29371- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
29372+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
29373
29374 /**
29375 * Clean up driver-specific bits of the output
29376 */
29377- void (*destroy) (struct intel_dvo_device *dvo);
29378+ void (* const destroy) (struct intel_dvo_device *dvo);
29379
29380 /**
29381 * Debugging hook to dump device registers to log file
29382 */
29383- void (*dump_regs)(struct intel_dvo_device *dvo);
29384+ void (* const dump_regs)(struct intel_dvo_device *dvo);
29385 };
29386
29387-extern struct intel_dvo_dev_ops sil164_ops;
29388-extern struct intel_dvo_dev_ops ch7xxx_ops;
29389-extern struct intel_dvo_dev_ops ivch_ops;
29390-extern struct intel_dvo_dev_ops tfp410_ops;
29391-extern struct intel_dvo_dev_ops ch7017_ops;
29392+extern const struct intel_dvo_dev_ops sil164_ops;
29393+extern const struct intel_dvo_dev_ops ch7xxx_ops;
29394+extern const struct intel_dvo_dev_ops ivch_ops;
29395+extern const struct intel_dvo_dev_ops tfp410_ops;
29396+extern const struct intel_dvo_dev_ops ch7017_ops;
29397
29398 #endif /* _INTEL_DVO_H */
29399diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c
29400--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
29401+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
29402@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
29403 }
29404 }
29405
29406-struct intel_dvo_dev_ops ivch_ops= {
29407+const struct intel_dvo_dev_ops ivch_ops= {
29408 .init = ivch_init,
29409 .dpms = ivch_dpms,
29410 .save = ivch_save,
29411diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c
29412--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
29413+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
29414@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
29415 }
29416 }
29417
29418-struct intel_dvo_dev_ops sil164_ops = {
29419+const struct intel_dvo_dev_ops sil164_ops = {
29420 .init = sil164_init,
29421 .detect = sil164_detect,
29422 .mode_valid = sil164_mode_valid,
29423diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c
29424--- linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
29425+++ linux-2.6.32.45/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
29426@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
29427 }
29428 }
29429
29430-struct intel_dvo_dev_ops tfp410_ops = {
29431+const struct intel_dvo_dev_ops tfp410_ops = {
29432 .init = tfp410_init,
29433 .detect = tfp410_detect,
29434 .mode_valid = tfp410_mode_valid,
29435diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c
29436--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
29437+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
29438@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
29439 I915_READ(GTIMR));
29440 }
29441 seq_printf(m, "Interrupts received: %d\n",
29442- atomic_read(&dev_priv->irq_received));
29443+ atomic_read_unchecked(&dev_priv->irq_received));
29444 if (dev_priv->hw_status_page != NULL) {
29445 seq_printf(m, "Current sequence: %d\n",
29446 i915_get_gem_seqno(dev));
29447diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c
29448--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
29449+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
29450@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
29451 return i915_resume(dev);
29452 }
29453
29454-static struct vm_operations_struct i915_gem_vm_ops = {
29455+static const struct vm_operations_struct i915_gem_vm_ops = {
29456 .fault = i915_gem_fault,
29457 .open = drm_gem_vm_open,
29458 .close = drm_gem_vm_close,
29459diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h
29460--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
29461+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_drv.h 2011-08-05 20:33:55.000000000 -0400
29462@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
29463 /* display clock increase/decrease */
29464 /* pll clock increase/decrease */
29465 /* clock gating init */
29466-};
29467+} __no_const;
29468
29469 typedef struct drm_i915_private {
29470 struct drm_device *dev;
29471@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
29472 int page_flipping;
29473
29474 wait_queue_head_t irq_queue;
29475- atomic_t irq_received;
29476+ atomic_unchecked_t irq_received;
29477 /** Protects user_irq_refcount and irq_mask_reg */
29478 spinlock_t user_irq_lock;
29479 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
29480diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c
29481--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
29482+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
29483@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
29484
29485 args->aper_size = dev->gtt_total;
29486 args->aper_available_size = (args->aper_size -
29487- atomic_read(&dev->pin_memory));
29488+ atomic_read_unchecked(&dev->pin_memory));
29489
29490 return 0;
29491 }
29492@@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
29493 return -EINVAL;
29494 }
29495
29496+ if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29497+ drm_gem_object_unreference(obj);
29498+ return -EFAULT;
29499+ }
29500+
29501 if (i915_gem_object_needs_bit17_swizzle(obj)) {
29502 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
29503 } else {
29504@@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
29505 return -EINVAL;
29506 }
29507
29508+ if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29509+ drm_gem_object_unreference(obj);
29510+ return -EFAULT;
29511+ }
29512+
29513 /* We can only do the GTT pwrite on untiled buffers, as otherwise
29514 * it would end up going through the fenced access, and we'll get
29515 * different detiling behavior between reading and writing.
29516@@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
29517
29518 if (obj_priv->gtt_space) {
29519 atomic_dec(&dev->gtt_count);
29520- atomic_sub(obj->size, &dev->gtt_memory);
29521+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
29522
29523 drm_mm_put_block(obj_priv->gtt_space);
29524 obj_priv->gtt_space = NULL;
29525@@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
29526 goto search_free;
29527 }
29528 atomic_inc(&dev->gtt_count);
29529- atomic_add(obj->size, &dev->gtt_memory);
29530+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
29531
29532 /* Assert that the object is not currently in any GPU domain. As it
29533 * wasn't in the GTT, there shouldn't be any way it could have been in
29534@@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
29535 "%d/%d gtt bytes\n",
29536 atomic_read(&dev->object_count),
29537 atomic_read(&dev->pin_count),
29538- atomic_read(&dev->object_memory),
29539- atomic_read(&dev->pin_memory),
29540- atomic_read(&dev->gtt_memory),
29541+ atomic_read_unchecked(&dev->object_memory),
29542+ atomic_read_unchecked(&dev->pin_memory),
29543+ atomic_read_unchecked(&dev->gtt_memory),
29544 dev->gtt_total);
29545 }
29546 goto err;
29547@@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
29548 */
29549 if (obj_priv->pin_count == 1) {
29550 atomic_inc(&dev->pin_count);
29551- atomic_add(obj->size, &dev->pin_memory);
29552+ atomic_add_unchecked(obj->size, &dev->pin_memory);
29553 if (!obj_priv->active &&
29554 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
29555 !list_empty(&obj_priv->list))
29556@@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
29557 list_move_tail(&obj_priv->list,
29558 &dev_priv->mm.inactive_list);
29559 atomic_dec(&dev->pin_count);
29560- atomic_sub(obj->size, &dev->pin_memory);
29561+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
29562 }
29563 i915_verify_inactive(dev, __FILE__, __LINE__);
29564 }
29565diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c
29566--- linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
29567+++ linux-2.6.32.45/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
29568@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
29569 int irq_received;
29570 int ret = IRQ_NONE;
29571
29572- atomic_inc(&dev_priv->irq_received);
29573+ atomic_inc_unchecked(&dev_priv->irq_received);
29574
29575 if (IS_IGDNG(dev))
29576 return igdng_irq_handler(dev);
29577@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
29578 {
29579 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29580
29581- atomic_set(&dev_priv->irq_received, 0);
29582+ atomic_set_unchecked(&dev_priv->irq_received, 0);
29583
29584 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29585 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29586diff -urNp linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c
29587--- linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c 2011-03-27 14:31:47.000000000 -0400
29588+++ linux-2.6.32.45/drivers/gpu/drm/i915/intel_sdvo.c 2011-08-05 20:33:55.000000000 -0400
29589@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *
29590 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
29591
29592 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
29593- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29594+ pax_open_kernel();
29595+ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29596+ pax_close_kernel();
29597
29598 /* Read the regs to test if we can talk to the device */
29599 for (i = 0; i < 0x40; i++) {
29600diff -urNp linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h
29601--- linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
29602+++ linux-2.6.32.45/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
29603@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
29604 u32 clear_cmd;
29605 u32 maccess;
29606
29607- atomic_t vbl_received; /**< Number of vblanks received. */
29608+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
29609 wait_queue_head_t fence_queue;
29610- atomic_t last_fence_retired;
29611+ atomic_unchecked_t last_fence_retired;
29612 u32 next_fence_to_post;
29613
29614 unsigned int fb_cpp;
29615diff -urNp linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c
29616--- linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
29617+++ linux-2.6.32.45/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
29618@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
29619 if (crtc != 0)
29620 return 0;
29621
29622- return atomic_read(&dev_priv->vbl_received);
29623+ return atomic_read_unchecked(&dev_priv->vbl_received);
29624 }
29625
29626
29627@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29628 /* VBLANK interrupt */
29629 if (status & MGA_VLINEPEN) {
29630 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29631- atomic_inc(&dev_priv->vbl_received);
29632+ atomic_inc_unchecked(&dev_priv->vbl_received);
29633 drm_handle_vblank(dev, 0);
29634 handled = 1;
29635 }
29636@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29637 MGA_WRITE(MGA_PRIMEND, prim_end);
29638 }
29639
29640- atomic_inc(&dev_priv->last_fence_retired);
29641+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
29642 DRM_WAKEUP(&dev_priv->fence_queue);
29643 handled = 1;
29644 }
29645@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
29646 * using fences.
29647 */
29648 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29649- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29650+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29651 - *sequence) <= (1 << 23)));
29652
29653 *sequence = cur_fence;
29654diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c
29655--- linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
29656+++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
29657@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
29658
29659 /* GH: Simple idle check.
29660 */
29661- atomic_set(&dev_priv->idle_count, 0);
29662+ atomic_set_unchecked(&dev_priv->idle_count, 0);
29663
29664 /* We don't support anything other than bus-mastering ring mode,
29665 * but the ring can be in either AGP or PCI space for the ring
29666diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h
29667--- linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
29668+++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
29669@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
29670 int is_pci;
29671 unsigned long cce_buffers_offset;
29672
29673- atomic_t idle_count;
29674+ atomic_unchecked_t idle_count;
29675
29676 int page_flipping;
29677 int current_page;
29678 u32 crtc_offset;
29679 u32 crtc_offset_cntl;
29680
29681- atomic_t vbl_received;
29682+ atomic_unchecked_t vbl_received;
29683
29684 u32 color_fmt;
29685 unsigned int front_offset;
29686diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c
29687--- linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
29688+++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
29689@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
29690 if (crtc != 0)
29691 return 0;
29692
29693- return atomic_read(&dev_priv->vbl_received);
29694+ return atomic_read_unchecked(&dev_priv->vbl_received);
29695 }
29696
29697 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
29698@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
29699 /* VBLANK interrupt */
29700 if (status & R128_CRTC_VBLANK_INT) {
29701 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
29702- atomic_inc(&dev_priv->vbl_received);
29703+ atomic_inc_unchecked(&dev_priv->vbl_received);
29704 drm_handle_vblank(dev, 0);
29705 return IRQ_HANDLED;
29706 }
29707diff -urNp linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c
29708--- linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
29709+++ linux-2.6.32.45/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
29710@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
29711
29712 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
29713 {
29714- if (atomic_read(&dev_priv->idle_count) == 0) {
29715+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
29716 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
29717 } else {
29718- atomic_set(&dev_priv->idle_count, 0);
29719+ atomic_set_unchecked(&dev_priv->idle_count, 0);
29720 }
29721 }
29722
29723diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c
29724--- linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
29725+++ linux-2.6.32.45/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
29726@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
29727 char name[512];
29728 int i;
29729
29730+ pax_track_stack();
29731+
29732 ctx->card = card;
29733 ctx->bios = bios;
29734
29735diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c
29736--- linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
29737+++ linux-2.6.32.45/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
29738@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
29739 regex_t mask_rex;
29740 regmatch_t match[4];
29741 char buf[1024];
29742- size_t end;
29743+ long end;
29744 int len;
29745 int done = 0;
29746 int r;
29747 unsigned o;
29748 struct offset *offset;
29749 char last_reg_s[10];
29750- int last_reg;
29751+ unsigned long last_reg;
29752
29753 if (regcomp
29754 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
29755diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c
29756--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
29757+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
29758@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
29759 bool linkb;
29760 struct radeon_i2c_bus_rec ddc_bus;
29761
29762+ pax_track_stack();
29763+
29764 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29765
29766 if (data_offset == 0)
29767@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
29768 }
29769 }
29770
29771-struct bios_connector {
29772+static struct bios_connector {
29773 bool valid;
29774 uint16_t line_mux;
29775 uint16_t devices;
29776 int connector_type;
29777 struct radeon_i2c_bus_rec ddc_bus;
29778-};
29779+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29780
29781 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
29782 drm_device
29783@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
29784 uint8_t dac;
29785 union atom_supported_devices *supported_devices;
29786 int i, j;
29787- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29788
29789 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29790
29791diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c
29792--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
29793+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
29794@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
29795
29796 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
29797 error = freq - current_freq;
29798- error = error < 0 ? 0xffffffff : error;
29799+ error = (int32_t)error < 0 ? 0xffffffff : error;
29800 } else
29801 error = abs(current_freq - freq);
29802 vco_diff = abs(vco - best_vco);
29803diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h
29804--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
29805+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
29806@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
29807
29808 /* SW interrupt */
29809 wait_queue_head_t swi_queue;
29810- atomic_t swi_emitted;
29811+ atomic_unchecked_t swi_emitted;
29812 int vblank_crtc;
29813 uint32_t irq_enable_reg;
29814 uint32_t r500_disp_irq_reg;
29815diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c
29816--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
29817+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
29818@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
29819 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
29820 return 0;
29821 }
29822- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
29823+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
29824 if (!rdev->cp.ready) {
29825 /* FIXME: cp is not running assume everythings is done right
29826 * away
29827@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
29828 return r;
29829 }
29830 WREG32(rdev->fence_drv.scratch_reg, 0);
29831- atomic_set(&rdev->fence_drv.seq, 0);
29832+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
29833 INIT_LIST_HEAD(&rdev->fence_drv.created);
29834 INIT_LIST_HEAD(&rdev->fence_drv.emited);
29835 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
29836diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h
29837--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
29838+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon.h 2011-08-05 20:33:55.000000000 -0400
29839@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
29840 */
29841 struct radeon_fence_driver {
29842 uint32_t scratch_reg;
29843- atomic_t seq;
29844+ atomic_unchecked_t seq;
29845 uint32_t last_seq;
29846 unsigned long count_timeout;
29847 wait_queue_head_t queue;
29848@@ -640,7 +640,7 @@ struct radeon_asic {
29849 uint32_t offset, uint32_t obj_size);
29850 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
29851 void (*bandwidth_update)(struct radeon_device *rdev);
29852-};
29853+} __no_const;
29854
29855 /*
29856 * Asic structures
29857diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c
29858--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
29859+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
29860@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
29861 request = compat_alloc_user_space(sizeof(*request));
29862 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
29863 || __put_user(req32.param, &request->param)
29864- || __put_user((void __user *)(unsigned long)req32.value,
29865+ || __put_user((unsigned long)req32.value,
29866 &request->value))
29867 return -EFAULT;
29868
29869diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c
29870--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
29871+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
29872@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
29873 unsigned int ret;
29874 RING_LOCALS;
29875
29876- atomic_inc(&dev_priv->swi_emitted);
29877- ret = atomic_read(&dev_priv->swi_emitted);
29878+ atomic_inc_unchecked(&dev_priv->swi_emitted);
29879+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
29880
29881 BEGIN_RING(4);
29882 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
29883@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
29884 drm_radeon_private_t *dev_priv =
29885 (drm_radeon_private_t *) dev->dev_private;
29886
29887- atomic_set(&dev_priv->swi_emitted, 0);
29888+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
29889 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
29890
29891 dev->max_vblank_count = 0x001fffff;
29892diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c
29893--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
29894+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
29895@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
29896 {
29897 drm_radeon_private_t *dev_priv = dev->dev_private;
29898 drm_radeon_getparam_t *param = data;
29899- int value;
29900+ int value = 0;
29901
29902 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
29903
29904diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c
29905--- linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
29906+++ linux-2.6.32.45/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
29907@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
29908 DRM_INFO("radeon: ttm finalized\n");
29909 }
29910
29911-static struct vm_operations_struct radeon_ttm_vm_ops;
29912-static const struct vm_operations_struct *ttm_vm_ops = NULL;
29913-
29914-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
29915-{
29916- struct ttm_buffer_object *bo;
29917- int r;
29918-
29919- bo = (struct ttm_buffer_object *)vma->vm_private_data;
29920- if (bo == NULL) {
29921- return VM_FAULT_NOPAGE;
29922- }
29923- r = ttm_vm_ops->fault(vma, vmf);
29924- return r;
29925-}
29926-
29927 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
29928 {
29929 struct drm_file *file_priv;
29930 struct radeon_device *rdev;
29931- int r;
29932
29933 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
29934 return drm_mmap(filp, vma);
29935@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
29936
29937 file_priv = (struct drm_file *)filp->private_data;
29938 rdev = file_priv->minor->dev->dev_private;
29939- if (rdev == NULL) {
29940+ if (!rdev)
29941 return -EINVAL;
29942- }
29943- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
29944- if (unlikely(r != 0)) {
29945- return r;
29946- }
29947- if (unlikely(ttm_vm_ops == NULL)) {
29948- ttm_vm_ops = vma->vm_ops;
29949- radeon_ttm_vm_ops = *ttm_vm_ops;
29950- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29951- }
29952- vma->vm_ops = &radeon_ttm_vm_ops;
29953- return 0;
29954+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
29955 }
29956
29957
29958diff -urNp linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c
29959--- linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
29960+++ linux-2.6.32.45/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
29961@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
29962 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
29963 rdev->pm.sideport_bandwidth.full)
29964 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
29965- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
29966+ read_delay_latency.full = rfixed_const(800 * 1000);
29967 read_delay_latency.full = rfixed_div(read_delay_latency,
29968 rdev->pm.igp_sideport_mclk);
29969+ a.full = rfixed_const(370);
29970+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
29971 } else {
29972 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
29973 rdev->pm.k8_bandwidth.full)
29974diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c
29975--- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
29976+++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
29977@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
29978 NULL
29979 };
29980
29981-static struct sysfs_ops ttm_bo_global_ops = {
29982+static const struct sysfs_ops ttm_bo_global_ops = {
29983 .show = &ttm_bo_global_show
29984 };
29985
29986diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c
29987--- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
29988+++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
29989@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
29990 {
29991 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
29992 vma->vm_private_data;
29993- struct ttm_bo_device *bdev = bo->bdev;
29994+ struct ttm_bo_device *bdev;
29995 unsigned long bus_base;
29996 unsigned long bus_offset;
29997 unsigned long bus_size;
29998@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
29999 unsigned long address = (unsigned long)vmf->virtual_address;
30000 int retval = VM_FAULT_NOPAGE;
30001
30002+ if (!bo)
30003+ return VM_FAULT_NOPAGE;
30004+ bdev = bo->bdev;
30005+
30006 /*
30007 * Work around locking order reversal in fault / nopfn
30008 * between mmap_sem and bo_reserve: Perform a trylock operation
30009diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c
30010--- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
30011+++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
30012@@ -36,7 +36,7 @@
30013 struct ttm_global_item {
30014 struct mutex mutex;
30015 void *object;
30016- int refcount;
30017+ atomic_t refcount;
30018 };
30019
30020 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
30021@@ -49,7 +49,7 @@ void ttm_global_init(void)
30022 struct ttm_global_item *item = &glob[i];
30023 mutex_init(&item->mutex);
30024 item->object = NULL;
30025- item->refcount = 0;
30026+ atomic_set(&item->refcount, 0);
30027 }
30028 }
30029
30030@@ -59,7 +59,7 @@ void ttm_global_release(void)
30031 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
30032 struct ttm_global_item *item = &glob[i];
30033 BUG_ON(item->object != NULL);
30034- BUG_ON(item->refcount != 0);
30035+ BUG_ON(atomic_read(&item->refcount) != 0);
30036 }
30037 }
30038
30039@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
30040 void *object;
30041
30042 mutex_lock(&item->mutex);
30043- if (item->refcount == 0) {
30044+ if (atomic_read(&item->refcount) == 0) {
30045 item->object = kzalloc(ref->size, GFP_KERNEL);
30046 if (unlikely(item->object == NULL)) {
30047 ret = -ENOMEM;
30048@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
30049 goto out_err;
30050
30051 }
30052- ++item->refcount;
30053+ atomic_inc(&item->refcount);
30054 ref->object = item->object;
30055 object = item->object;
30056 mutex_unlock(&item->mutex);
30057@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
30058 struct ttm_global_item *item = &glob[ref->global_type];
30059
30060 mutex_lock(&item->mutex);
30061- BUG_ON(item->refcount == 0);
30062+ BUG_ON(atomic_read(&item->refcount) == 0);
30063 BUG_ON(ref->object != item->object);
30064- if (--item->refcount == 0) {
30065+ if (atomic_dec_and_test(&item->refcount)) {
30066 ref->release(ref);
30067 item->object = NULL;
30068 }
30069diff -urNp linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c
30070--- linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
30071+++ linux-2.6.32.45/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
30072@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
30073 NULL
30074 };
30075
30076-static struct sysfs_ops ttm_mem_zone_ops = {
30077+static const struct sysfs_ops ttm_mem_zone_ops = {
30078 .show = &ttm_mem_zone_show,
30079 .store = &ttm_mem_zone_store
30080 };
30081diff -urNp linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h
30082--- linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
30083+++ linux-2.6.32.45/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
30084@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
30085 typedef uint32_t maskarray_t[5];
30086
30087 typedef struct drm_via_irq {
30088- atomic_t irq_received;
30089+ atomic_unchecked_t irq_received;
30090 uint32_t pending_mask;
30091 uint32_t enable_mask;
30092 wait_queue_head_t irq_queue;
30093@@ -75,7 +75,7 @@ typedef struct drm_via_private {
30094 struct timeval last_vblank;
30095 int last_vblank_valid;
30096 unsigned usec_per_vblank;
30097- atomic_t vbl_received;
30098+ atomic_unchecked_t vbl_received;
30099 drm_via_state_t hc_state;
30100 char pci_buf[VIA_PCI_BUF_SIZE];
30101 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
30102diff -urNp linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c
30103--- linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
30104+++ linux-2.6.32.45/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
30105@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
30106 if (crtc != 0)
30107 return 0;
30108
30109- return atomic_read(&dev_priv->vbl_received);
30110+ return atomic_read_unchecked(&dev_priv->vbl_received);
30111 }
30112
30113 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30114@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
30115
30116 status = VIA_READ(VIA_REG_INTERRUPT);
30117 if (status & VIA_IRQ_VBLANK_PENDING) {
30118- atomic_inc(&dev_priv->vbl_received);
30119- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
30120+ atomic_inc_unchecked(&dev_priv->vbl_received);
30121+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
30122 do_gettimeofday(&cur_vblank);
30123 if (dev_priv->last_vblank_valid) {
30124 dev_priv->usec_per_vblank =
30125@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
30126 dev_priv->last_vblank = cur_vblank;
30127 dev_priv->last_vblank_valid = 1;
30128 }
30129- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
30130+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
30131 DRM_DEBUG("US per vblank is: %u\n",
30132 dev_priv->usec_per_vblank);
30133 }
30134@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
30135
30136 for (i = 0; i < dev_priv->num_irqs; ++i) {
30137 if (status & cur_irq->pending_mask) {
30138- atomic_inc(&cur_irq->irq_received);
30139+ atomic_inc_unchecked(&cur_irq->irq_received);
30140 DRM_WAKEUP(&cur_irq->irq_queue);
30141 handled = 1;
30142 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
30143@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
30144 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30145 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
30146 masks[irq][4]));
30147- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
30148+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
30149 } else {
30150 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30151 (((cur_irq_sequence =
30152- atomic_read(&cur_irq->irq_received)) -
30153+ atomic_read_unchecked(&cur_irq->irq_received)) -
30154 *sequence) <= (1 << 23)));
30155 }
30156 *sequence = cur_irq_sequence;
30157@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
30158 }
30159
30160 for (i = 0; i < dev_priv->num_irqs; ++i) {
30161- atomic_set(&cur_irq->irq_received, 0);
30162+ atomic_set_unchecked(&cur_irq->irq_received, 0);
30163 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
30164 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
30165 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
30166@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
30167 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
30168 case VIA_IRQ_RELATIVE:
30169 irqwait->request.sequence +=
30170- atomic_read(&cur_irq->irq_received);
30171+ atomic_read_unchecked(&cur_irq->irq_received);
30172 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
30173 case VIA_IRQ_ABSOLUTE:
30174 break;
30175diff -urNp linux-2.6.32.45/drivers/hid/hid-core.c linux-2.6.32.45/drivers/hid/hid-core.c
30176--- linux-2.6.32.45/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
30177+++ linux-2.6.32.45/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
30178@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
30179
30180 int hid_add_device(struct hid_device *hdev)
30181 {
30182- static atomic_t id = ATOMIC_INIT(0);
30183+ static atomic_unchecked_t id = ATOMIC_INIT(0);
30184 int ret;
30185
30186 if (WARN_ON(hdev->status & HID_STAT_ADDED))
30187@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
30188 /* XXX hack, any other cleaner solution after the driver core
30189 * is converted to allow more than 20 bytes as the device name? */
30190 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
30191- hdev->vendor, hdev->product, atomic_inc_return(&id));
30192+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
30193
30194 ret = device_add(&hdev->dev);
30195 if (!ret)
30196diff -urNp linux-2.6.32.45/drivers/hid/usbhid/hiddev.c linux-2.6.32.45/drivers/hid/usbhid/hiddev.c
30197--- linux-2.6.32.45/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
30198+++ linux-2.6.32.45/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
30199@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
30200 return put_user(HID_VERSION, (int __user *)arg);
30201
30202 case HIDIOCAPPLICATION:
30203- if (arg < 0 || arg >= hid->maxapplication)
30204+ if (arg >= hid->maxapplication)
30205 return -EINVAL;
30206
30207 for (i = 0; i < hid->maxcollection; i++)
30208diff -urNp linux-2.6.32.45/drivers/hwmon/lis3lv02d.c linux-2.6.32.45/drivers/hwmon/lis3lv02d.c
30209--- linux-2.6.32.45/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
30210+++ linux-2.6.32.45/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
30211@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
30212 * the lid is closed. This leads to interrupts as soon as a little move
30213 * is done.
30214 */
30215- atomic_inc(&lis3_dev.count);
30216+ atomic_inc_unchecked(&lis3_dev.count);
30217
30218 wake_up_interruptible(&lis3_dev.misc_wait);
30219 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
30220@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
30221 if (test_and_set_bit(0, &lis3_dev.misc_opened))
30222 return -EBUSY; /* already open */
30223
30224- atomic_set(&lis3_dev.count, 0);
30225+ atomic_set_unchecked(&lis3_dev.count, 0);
30226
30227 /*
30228 * The sensor can generate interrupts for free-fall and direction
30229@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
30230 add_wait_queue(&lis3_dev.misc_wait, &wait);
30231 while (true) {
30232 set_current_state(TASK_INTERRUPTIBLE);
30233- data = atomic_xchg(&lis3_dev.count, 0);
30234+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
30235 if (data)
30236 break;
30237
30238@@ -244,7 +244,7 @@ out:
30239 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
30240 {
30241 poll_wait(file, &lis3_dev.misc_wait, wait);
30242- if (atomic_read(&lis3_dev.count))
30243+ if (atomic_read_unchecked(&lis3_dev.count))
30244 return POLLIN | POLLRDNORM;
30245 return 0;
30246 }
30247diff -urNp linux-2.6.32.45/drivers/hwmon/lis3lv02d.h linux-2.6.32.45/drivers/hwmon/lis3lv02d.h
30248--- linux-2.6.32.45/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
30249+++ linux-2.6.32.45/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
30250@@ -201,7 +201,7 @@ struct lis3lv02d {
30251
30252 struct input_polled_dev *idev; /* input device */
30253 struct platform_device *pdev; /* platform device */
30254- atomic_t count; /* interrupt count after last read */
30255+ atomic_unchecked_t count; /* interrupt count after last read */
30256 int xcalib; /* calibrated null value for x */
30257 int ycalib; /* calibrated null value for y */
30258 int zcalib; /* calibrated null value for z */
30259diff -urNp linux-2.6.32.45/drivers/hwmon/sht15.c linux-2.6.32.45/drivers/hwmon/sht15.c
30260--- linux-2.6.32.45/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
30261+++ linux-2.6.32.45/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
30262@@ -112,7 +112,7 @@ struct sht15_data {
30263 int supply_uV;
30264 int supply_uV_valid;
30265 struct work_struct update_supply_work;
30266- atomic_t interrupt_handled;
30267+ atomic_unchecked_t interrupt_handled;
30268 };
30269
30270 /**
30271@@ -245,13 +245,13 @@ static inline int sht15_update_single_va
30272 return ret;
30273
30274 gpio_direction_input(data->pdata->gpio_data);
30275- atomic_set(&data->interrupt_handled, 0);
30276+ atomic_set_unchecked(&data->interrupt_handled, 0);
30277
30278 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30279 if (gpio_get_value(data->pdata->gpio_data) == 0) {
30280 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
30281 /* Only relevant if the interrupt hasn't occured. */
30282- if (!atomic_read(&data->interrupt_handled))
30283+ if (!atomic_read_unchecked(&data->interrupt_handled))
30284 schedule_work(&data->read_work);
30285 }
30286 ret = wait_event_timeout(data->wait_queue,
30287@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
30288 struct sht15_data *data = d;
30289 /* First disable the interrupt */
30290 disable_irq_nosync(irq);
30291- atomic_inc(&data->interrupt_handled);
30292+ atomic_inc_unchecked(&data->interrupt_handled);
30293 /* Then schedule a reading work struct */
30294 if (data->flag != SHT15_READING_NOTHING)
30295 schedule_work(&data->read_work);
30296@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
30297 here as could have gone low in meantime so verify
30298 it hasn't!
30299 */
30300- atomic_set(&data->interrupt_handled, 0);
30301+ atomic_set_unchecked(&data->interrupt_handled, 0);
30302 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30303 /* If still not occured or another handler has been scheduled */
30304 if (gpio_get_value(data->pdata->gpio_data)
30305- || atomic_read(&data->interrupt_handled))
30306+ || atomic_read_unchecked(&data->interrupt_handled))
30307 return;
30308 }
30309 /* Read the data back from the device */
30310diff -urNp linux-2.6.32.45/drivers/hwmon/w83791d.c linux-2.6.32.45/drivers/hwmon/w83791d.c
30311--- linux-2.6.32.45/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
30312+++ linux-2.6.32.45/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
30313@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
30314 struct i2c_board_info *info);
30315 static int w83791d_remove(struct i2c_client *client);
30316
30317-static int w83791d_read(struct i2c_client *client, u8 register);
30318-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
30319+static int w83791d_read(struct i2c_client *client, u8 reg);
30320+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
30321 static struct w83791d_data *w83791d_update_device(struct device *dev);
30322
30323 #ifdef DEBUG
30324diff -urNp linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c
30325--- linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c 2011-03-27 14:31:47.000000000 -0400
30326+++ linux-2.6.32.45/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-05 20:33:55.000000000 -0400
30327@@ -189,23 +189,23 @@ static int __init amd756_s4882_init(void
30328 }
30329
30330 /* Fill in the new structures */
30331- s4882_algo[0] = *(amd756_smbus.algo);
30332- s4882_algo[0].smbus_xfer = amd756_access_virt0;
30333+ memcpy((void *)&s4882_algo[0], amd756_smbus.algo, sizeof(s4882_algo[0]));
30334+ *(void **)&s4882_algo[0].smbus_xfer = amd756_access_virt0;
30335 s4882_adapter[0] = amd756_smbus;
30336 s4882_adapter[0].algo = s4882_algo;
30337- s4882_adapter[0].dev.parent = amd756_smbus.dev.parent;
30338+ *(void **)&s4882_adapter[0].dev.parent = amd756_smbus.dev.parent;
30339 for (i = 1; i < 5; i++) {
30340- s4882_algo[i] = *(amd756_smbus.algo);
30341+ memcpy((void *)&s4882_algo[i], amd756_smbus.algo, sizeof(s4882_algo[i]));
30342 s4882_adapter[i] = amd756_smbus;
30343 snprintf(s4882_adapter[i].name, sizeof(s4882_adapter[i].name),
30344 "SMBus 8111 adapter (CPU%d)", i-1);
30345 s4882_adapter[i].algo = s4882_algo+i;
30346 s4882_adapter[i].dev.parent = amd756_smbus.dev.parent;
30347 }
30348- s4882_algo[1].smbus_xfer = amd756_access_virt1;
30349- s4882_algo[2].smbus_xfer = amd756_access_virt2;
30350- s4882_algo[3].smbus_xfer = amd756_access_virt3;
30351- s4882_algo[4].smbus_xfer = amd756_access_virt4;
30352+ *(void **)&s4882_algo[1].smbus_xfer = amd756_access_virt1;
30353+ *(void **)&s4882_algo[2].smbus_xfer = amd756_access_virt2;
30354+ *(void **)&s4882_algo[3].smbus_xfer = amd756_access_virt3;
30355+ *(void **)&s4882_algo[4].smbus_xfer = amd756_access_virt4;
30356
30357 /* Register virtual adapters */
30358 for (i = 0; i < 5; i++) {
30359diff -urNp linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c
30360--- linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-03-27 14:31:47.000000000 -0400
30361+++ linux-2.6.32.45/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-05 20:33:55.000000000 -0400
30362@@ -184,23 +184,23 @@ static int __init nforce2_s4985_init(voi
30363 }
30364
30365 /* Fill in the new structures */
30366- s4985_algo[0] = *(nforce2_smbus->algo);
30367- s4985_algo[0].smbus_xfer = nforce2_access_virt0;
30368+ memcpy((void *)&s4985_algo[0], nforce2_smbus->algo, sizeof(s4985_algo[0]));
30369+ *(void **)&s4985_algo[0].smbus_xfer = nforce2_access_virt0;
30370 s4985_adapter[0] = *nforce2_smbus;
30371 s4985_adapter[0].algo = s4985_algo;
30372 s4985_adapter[0].dev.parent = nforce2_smbus->dev.parent;
30373 for (i = 1; i < 5; i++) {
30374- s4985_algo[i] = *(nforce2_smbus->algo);
30375+ memcpy((void *)&s4985_algo[i], nforce2_smbus->algo, sizeof(s4985_algo[i]));
30376 s4985_adapter[i] = *nforce2_smbus;
30377 snprintf(s4985_adapter[i].name, sizeof(s4985_adapter[i].name),
30378 "SMBus nForce2 adapter (CPU%d)", i - 1);
30379 s4985_adapter[i].algo = s4985_algo + i;
30380 s4985_adapter[i].dev.parent = nforce2_smbus->dev.parent;
30381 }
30382- s4985_algo[1].smbus_xfer = nforce2_access_virt1;
30383- s4985_algo[2].smbus_xfer = nforce2_access_virt2;
30384- s4985_algo[3].smbus_xfer = nforce2_access_virt3;
30385- s4985_algo[4].smbus_xfer = nforce2_access_virt4;
30386+ *(void **)&s4985_algo[1].smbus_xfer = nforce2_access_virt1;
30387+ *(void **)&s4985_algo[2].smbus_xfer = nforce2_access_virt2;
30388+ *(void **)&s4985_algo[3].smbus_xfer = nforce2_access_virt3;
30389+ *(void **)&s4985_algo[4].smbus_xfer = nforce2_access_virt4;
30390
30391 /* Register virtual adapters */
30392 for (i = 0; i < 5; i++) {
30393diff -urNp linux-2.6.32.45/drivers/ide/ide-cd.c linux-2.6.32.45/drivers/ide/ide-cd.c
30394--- linux-2.6.32.45/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
30395+++ linux-2.6.32.45/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
30396@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
30397 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
30398 if ((unsigned long)buf & alignment
30399 || blk_rq_bytes(rq) & q->dma_pad_mask
30400- || object_is_on_stack(buf))
30401+ || object_starts_on_stack(buf))
30402 drive->dma = 0;
30403 }
30404 }
30405diff -urNp linux-2.6.32.45/drivers/ide/ide-floppy.c linux-2.6.32.45/drivers/ide/ide-floppy.c
30406--- linux-2.6.32.45/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
30407+++ linux-2.6.32.45/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
30408@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
30409 u8 pc_buf[256], header_len, desc_cnt;
30410 int i, rc = 1, blocks, length;
30411
30412+ pax_track_stack();
30413+
30414 ide_debug_log(IDE_DBG_FUNC, "enter");
30415
30416 drive->bios_cyl = 0;
30417diff -urNp linux-2.6.32.45/drivers/ide/setup-pci.c linux-2.6.32.45/drivers/ide/setup-pci.c
30418--- linux-2.6.32.45/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
30419+++ linux-2.6.32.45/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
30420@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
30421 int ret, i, n_ports = dev2 ? 4 : 2;
30422 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
30423
30424+ pax_track_stack();
30425+
30426 for (i = 0; i < n_ports / 2; i++) {
30427 ret = ide_setup_pci_controller(pdev[i], d, !i);
30428 if (ret < 0)
30429diff -urNp linux-2.6.32.45/drivers/ieee1394/dv1394.c linux-2.6.32.45/drivers/ieee1394/dv1394.c
30430--- linux-2.6.32.45/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
30431+++ linux-2.6.32.45/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
30432@@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
30433 based upon DIF section and sequence
30434 */
30435
30436-static void inline
30437+static inline void
30438 frame_put_packet (struct frame *f, struct packet *p)
30439 {
30440 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
30441diff -urNp linux-2.6.32.45/drivers/ieee1394/hosts.c linux-2.6.32.45/drivers/ieee1394/hosts.c
30442--- linux-2.6.32.45/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
30443+++ linux-2.6.32.45/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
30444@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
30445 }
30446
30447 static struct hpsb_host_driver dummy_driver = {
30448+ .name = "dummy",
30449 .transmit_packet = dummy_transmit_packet,
30450 .devctl = dummy_devctl,
30451 .isoctl = dummy_isoctl
30452diff -urNp linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c
30453--- linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
30454+++ linux-2.6.32.45/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
30455@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
30456 for (func = 0; func < 8; func++) {
30457 u32 class = read_pci_config(num,slot,func,
30458 PCI_CLASS_REVISION);
30459- if ((class == 0xffffffff))
30460+ if (class == 0xffffffff)
30461 continue; /* No device at this func */
30462
30463 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
30464diff -urNp linux-2.6.32.45/drivers/ieee1394/ohci1394.c linux-2.6.32.45/drivers/ieee1394/ohci1394.c
30465--- linux-2.6.32.45/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
30466+++ linux-2.6.32.45/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
30467@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
30468 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
30469
30470 /* Module Parameters */
30471-static int phys_dma = 1;
30472+static int phys_dma;
30473 module_param(phys_dma, int, 0444);
30474-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
30475+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
30476
30477 static void dma_trm_tasklet(unsigned long data);
30478 static void dma_trm_reset(struct dma_trm_ctx *d);
30479diff -urNp linux-2.6.32.45/drivers/ieee1394/sbp2.c linux-2.6.32.45/drivers/ieee1394/sbp2.c
30480--- linux-2.6.32.45/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
30481+++ linux-2.6.32.45/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
30482@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
30483 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
30484 MODULE_LICENSE("GPL");
30485
30486-static int sbp2_module_init(void)
30487+static int __init sbp2_module_init(void)
30488 {
30489 int ret;
30490
30491diff -urNp linux-2.6.32.45/drivers/infiniband/core/cm.c linux-2.6.32.45/drivers/infiniband/core/cm.c
30492--- linux-2.6.32.45/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
30493+++ linux-2.6.32.45/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
30494@@ -112,7 +112,7 @@ static char const counter_group_names[CM
30495
30496 struct cm_counter_group {
30497 struct kobject obj;
30498- atomic_long_t counter[CM_ATTR_COUNT];
30499+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
30500 };
30501
30502 struct cm_counter_attribute {
30503@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
30504 struct ib_mad_send_buf *msg = NULL;
30505 int ret;
30506
30507- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30508+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30509 counter[CM_REQ_COUNTER]);
30510
30511 /* Quick state check to discard duplicate REQs. */
30512@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
30513 if (!cm_id_priv)
30514 return;
30515
30516- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30517+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30518 counter[CM_REP_COUNTER]);
30519 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
30520 if (ret)
30521@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
30522 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
30523 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
30524 spin_unlock_irq(&cm_id_priv->lock);
30525- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30526+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30527 counter[CM_RTU_COUNTER]);
30528 goto out;
30529 }
30530@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
30531 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
30532 dreq_msg->local_comm_id);
30533 if (!cm_id_priv) {
30534- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30535+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30536 counter[CM_DREQ_COUNTER]);
30537 cm_issue_drep(work->port, work->mad_recv_wc);
30538 return -EINVAL;
30539@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
30540 case IB_CM_MRA_REP_RCVD:
30541 break;
30542 case IB_CM_TIMEWAIT:
30543- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30544+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30545 counter[CM_DREQ_COUNTER]);
30546 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30547 goto unlock;
30548@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
30549 cm_free_msg(msg);
30550 goto deref;
30551 case IB_CM_DREQ_RCVD:
30552- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30553+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30554 counter[CM_DREQ_COUNTER]);
30555 goto unlock;
30556 default:
30557@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
30558 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30559 cm_id_priv->msg, timeout)) {
30560 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
30561- atomic_long_inc(&work->port->
30562+ atomic_long_inc_unchecked(&work->port->
30563 counter_group[CM_RECV_DUPLICATES].
30564 counter[CM_MRA_COUNTER]);
30565 goto out;
30566@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
30567 break;
30568 case IB_CM_MRA_REQ_RCVD:
30569 case IB_CM_MRA_REP_RCVD:
30570- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30571+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30572 counter[CM_MRA_COUNTER]);
30573 /* fall through */
30574 default:
30575@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
30576 case IB_CM_LAP_IDLE:
30577 break;
30578 case IB_CM_MRA_LAP_SENT:
30579- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30580+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30581 counter[CM_LAP_COUNTER]);
30582 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30583 goto unlock;
30584@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
30585 cm_free_msg(msg);
30586 goto deref;
30587 case IB_CM_LAP_RCVD:
30588- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30589+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30590 counter[CM_LAP_COUNTER]);
30591 goto unlock;
30592 default:
30593@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
30594 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
30595 if (cur_cm_id_priv) {
30596 spin_unlock_irq(&cm.lock);
30597- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30598+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30599 counter[CM_SIDR_REQ_COUNTER]);
30600 goto out; /* Duplicate message. */
30601 }
30602@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
30603 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
30604 msg->retries = 1;
30605
30606- atomic_long_add(1 + msg->retries,
30607+ atomic_long_add_unchecked(1 + msg->retries,
30608 &port->counter_group[CM_XMIT].counter[attr_index]);
30609 if (msg->retries)
30610- atomic_long_add(msg->retries,
30611+ atomic_long_add_unchecked(msg->retries,
30612 &port->counter_group[CM_XMIT_RETRIES].
30613 counter[attr_index]);
30614
30615@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
30616 }
30617
30618 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
30619- atomic_long_inc(&port->counter_group[CM_RECV].
30620+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
30621 counter[attr_id - CM_ATTR_ID_OFFSET]);
30622
30623 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
30624@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
30625 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
30626
30627 return sprintf(buf, "%ld\n",
30628- atomic_long_read(&group->counter[cm_attr->index]));
30629+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
30630 }
30631
30632-static struct sysfs_ops cm_counter_ops = {
30633+static const struct sysfs_ops cm_counter_ops = {
30634 .show = cm_show_counter
30635 };
30636
30637diff -urNp linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c
30638--- linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
30639+++ linux-2.6.32.45/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
30640@@ -97,8 +97,8 @@ struct ib_fmr_pool {
30641
30642 struct task_struct *thread;
30643
30644- atomic_t req_ser;
30645- atomic_t flush_ser;
30646+ atomic_unchecked_t req_ser;
30647+ atomic_unchecked_t flush_ser;
30648
30649 wait_queue_head_t force_wait;
30650 };
30651@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
30652 struct ib_fmr_pool *pool = pool_ptr;
30653
30654 do {
30655- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30656+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30657 ib_fmr_batch_release(pool);
30658
30659- atomic_inc(&pool->flush_ser);
30660+ atomic_inc_unchecked(&pool->flush_ser);
30661 wake_up_interruptible(&pool->force_wait);
30662
30663 if (pool->flush_function)
30664@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
30665 }
30666
30667 set_current_state(TASK_INTERRUPTIBLE);
30668- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30669+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30670 !kthread_should_stop())
30671 schedule();
30672 __set_current_state(TASK_RUNNING);
30673@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
30674 pool->dirty_watermark = params->dirty_watermark;
30675 pool->dirty_len = 0;
30676 spin_lock_init(&pool->pool_lock);
30677- atomic_set(&pool->req_ser, 0);
30678- atomic_set(&pool->flush_ser, 0);
30679+ atomic_set_unchecked(&pool->req_ser, 0);
30680+ atomic_set_unchecked(&pool->flush_ser, 0);
30681 init_waitqueue_head(&pool->force_wait);
30682
30683 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30684@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
30685 }
30686 spin_unlock_irq(&pool->pool_lock);
30687
30688- serial = atomic_inc_return(&pool->req_ser);
30689+ serial = atomic_inc_return_unchecked(&pool->req_ser);
30690 wake_up_process(pool->thread);
30691
30692 if (wait_event_interruptible(pool->force_wait,
30693- atomic_read(&pool->flush_ser) - serial >= 0))
30694+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30695 return -EINTR;
30696
30697 return 0;
30698@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
30699 } else {
30700 list_add_tail(&fmr->list, &pool->dirty_list);
30701 if (++pool->dirty_len >= pool->dirty_watermark) {
30702- atomic_inc(&pool->req_ser);
30703+ atomic_inc_unchecked(&pool->req_ser);
30704 wake_up_process(pool->thread);
30705 }
30706 }
30707diff -urNp linux-2.6.32.45/drivers/infiniband/core/sysfs.c linux-2.6.32.45/drivers/infiniband/core/sysfs.c
30708--- linux-2.6.32.45/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
30709+++ linux-2.6.32.45/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
30710@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
30711 return port_attr->show(p, port_attr, buf);
30712 }
30713
30714-static struct sysfs_ops port_sysfs_ops = {
30715+static const struct sysfs_ops port_sysfs_ops = {
30716 .show = port_attr_show
30717 };
30718
30719diff -urNp linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c
30720--- linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
30721+++ linux-2.6.32.45/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
30722@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
30723 dst->grh.sgid_index = src->grh.sgid_index;
30724 dst->grh.hop_limit = src->grh.hop_limit;
30725 dst->grh.traffic_class = src->grh.traffic_class;
30726+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
30727 dst->dlid = src->dlid;
30728 dst->sl = src->sl;
30729 dst->src_path_bits = src->src_path_bits;
30730 dst->static_rate = src->static_rate;
30731 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
30732 dst->port_num = src->port_num;
30733+ dst->reserved = 0;
30734 }
30735 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
30736
30737 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
30738 struct ib_qp_attr *src)
30739 {
30740+ dst->qp_state = src->qp_state;
30741 dst->cur_qp_state = src->cur_qp_state;
30742 dst->path_mtu = src->path_mtu;
30743 dst->path_mig_state = src->path_mig_state;
30744@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
30745 dst->rnr_retry = src->rnr_retry;
30746 dst->alt_port_num = src->alt_port_num;
30747 dst->alt_timeout = src->alt_timeout;
30748+ memset(dst->reserved, 0, sizeof(dst->reserved));
30749 }
30750 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
30751
30752diff -urNp linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c
30753--- linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
30754+++ linux-2.6.32.45/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
30755@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
30756 struct infinipath_counters counters;
30757 struct ipath_devdata *dd;
30758
30759+ pax_track_stack();
30760+
30761 dd = file->f_path.dentry->d_inode->i_private;
30762 dd->ipath_f_read_counters(dd, &counters);
30763
30764diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c
30765--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
30766+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
30767@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
30768 LIST_HEAD(nes_adapter_list);
30769 static LIST_HEAD(nes_dev_list);
30770
30771-atomic_t qps_destroyed;
30772+atomic_unchecked_t qps_destroyed;
30773
30774 static unsigned int ee_flsh_adapter;
30775 static unsigned int sysfs_nonidx_addr;
30776@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
30777 struct nes_adapter *nesadapter = nesdev->nesadapter;
30778 u32 qp_id;
30779
30780- atomic_inc(&qps_destroyed);
30781+ atomic_inc_unchecked(&qps_destroyed);
30782
30783 /* Free the control structures */
30784
30785diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c
30786--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
30787+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
30788@@ -69,11 +69,11 @@ u32 cm_packets_received;
30789 u32 cm_listens_created;
30790 u32 cm_listens_destroyed;
30791 u32 cm_backlog_drops;
30792-atomic_t cm_loopbacks;
30793-atomic_t cm_nodes_created;
30794-atomic_t cm_nodes_destroyed;
30795-atomic_t cm_accel_dropped_pkts;
30796-atomic_t cm_resets_recvd;
30797+atomic_unchecked_t cm_loopbacks;
30798+atomic_unchecked_t cm_nodes_created;
30799+atomic_unchecked_t cm_nodes_destroyed;
30800+atomic_unchecked_t cm_accel_dropped_pkts;
30801+atomic_unchecked_t cm_resets_recvd;
30802
30803 static inline int mini_cm_accelerated(struct nes_cm_core *,
30804 struct nes_cm_node *);
30805@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
30806
30807 static struct nes_cm_core *g_cm_core;
30808
30809-atomic_t cm_connects;
30810-atomic_t cm_accepts;
30811-atomic_t cm_disconnects;
30812-atomic_t cm_closes;
30813-atomic_t cm_connecteds;
30814-atomic_t cm_connect_reqs;
30815-atomic_t cm_rejects;
30816+atomic_unchecked_t cm_connects;
30817+atomic_unchecked_t cm_accepts;
30818+atomic_unchecked_t cm_disconnects;
30819+atomic_unchecked_t cm_closes;
30820+atomic_unchecked_t cm_connecteds;
30821+atomic_unchecked_t cm_connect_reqs;
30822+atomic_unchecked_t cm_rejects;
30823
30824
30825 /**
30826@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
30827 cm_node->rem_mac);
30828
30829 add_hte_node(cm_core, cm_node);
30830- atomic_inc(&cm_nodes_created);
30831+ atomic_inc_unchecked(&cm_nodes_created);
30832
30833 return cm_node;
30834 }
30835@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
30836 }
30837
30838 atomic_dec(&cm_core->node_cnt);
30839- atomic_inc(&cm_nodes_destroyed);
30840+ atomic_inc_unchecked(&cm_nodes_destroyed);
30841 nesqp = cm_node->nesqp;
30842 if (nesqp) {
30843 nesqp->cm_node = NULL;
30844@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
30845
30846 static void drop_packet(struct sk_buff *skb)
30847 {
30848- atomic_inc(&cm_accel_dropped_pkts);
30849+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30850 dev_kfree_skb_any(skb);
30851 }
30852
30853@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
30854
30855 int reset = 0; /* whether to send reset in case of err.. */
30856 int passive_state;
30857- atomic_inc(&cm_resets_recvd);
30858+ atomic_inc_unchecked(&cm_resets_recvd);
30859 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30860 " refcnt=%d\n", cm_node, cm_node->state,
30861 atomic_read(&cm_node->ref_count));
30862@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
30863 rem_ref_cm_node(cm_node->cm_core, cm_node);
30864 return NULL;
30865 }
30866- atomic_inc(&cm_loopbacks);
30867+ atomic_inc_unchecked(&cm_loopbacks);
30868 loopbackremotenode->loopbackpartner = cm_node;
30869 loopbackremotenode->tcp_cntxt.rcv_wscale =
30870 NES_CM_DEFAULT_RCV_WND_SCALE;
30871@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
30872 add_ref_cm_node(cm_node);
30873 } else if (cm_node->state == NES_CM_STATE_TSA) {
30874 rem_ref_cm_node(cm_core, cm_node);
30875- atomic_inc(&cm_accel_dropped_pkts);
30876+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30877 dev_kfree_skb_any(skb);
30878 break;
30879 }
30880@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
30881
30882 if ((cm_id) && (cm_id->event_handler)) {
30883 if (issue_disconn) {
30884- atomic_inc(&cm_disconnects);
30885+ atomic_inc_unchecked(&cm_disconnects);
30886 cm_event.event = IW_CM_EVENT_DISCONNECT;
30887 cm_event.status = disconn_status;
30888 cm_event.local_addr = cm_id->local_addr;
30889@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
30890 }
30891
30892 if (issue_close) {
30893- atomic_inc(&cm_closes);
30894+ atomic_inc_unchecked(&cm_closes);
30895 nes_disconnect(nesqp, 1);
30896
30897 cm_id->provider_data = nesqp;
30898@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
30899
30900 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30901 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30902- atomic_inc(&cm_accepts);
30903+ atomic_inc_unchecked(&cm_accepts);
30904
30905 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30906 atomic_read(&nesvnic->netdev->refcnt));
30907@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
30908
30909 struct nes_cm_core *cm_core;
30910
30911- atomic_inc(&cm_rejects);
30912+ atomic_inc_unchecked(&cm_rejects);
30913 cm_node = (struct nes_cm_node *) cm_id->provider_data;
30914 loopback = cm_node->loopbackpartner;
30915 cm_core = cm_node->cm_core;
30916@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
30917 ntohl(cm_id->local_addr.sin_addr.s_addr),
30918 ntohs(cm_id->local_addr.sin_port));
30919
30920- atomic_inc(&cm_connects);
30921+ atomic_inc_unchecked(&cm_connects);
30922 nesqp->active_conn = 1;
30923
30924 /* cache the cm_id in the qp */
30925@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
30926 if (nesqp->destroyed) {
30927 return;
30928 }
30929- atomic_inc(&cm_connecteds);
30930+ atomic_inc_unchecked(&cm_connecteds);
30931 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30932 " local port 0x%04X. jiffies = %lu.\n",
30933 nesqp->hwqp.qp_id,
30934@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
30935
30936 ret = cm_id->event_handler(cm_id, &cm_event);
30937 cm_id->add_ref(cm_id);
30938- atomic_inc(&cm_closes);
30939+ atomic_inc_unchecked(&cm_closes);
30940 cm_event.event = IW_CM_EVENT_CLOSE;
30941 cm_event.status = IW_CM_EVENT_STATUS_OK;
30942 cm_event.provider_data = cm_id->provider_data;
30943@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
30944 return;
30945 cm_id = cm_node->cm_id;
30946
30947- atomic_inc(&cm_connect_reqs);
30948+ atomic_inc_unchecked(&cm_connect_reqs);
30949 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30950 cm_node, cm_id, jiffies);
30951
30952@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
30953 return;
30954 cm_id = cm_node->cm_id;
30955
30956- atomic_inc(&cm_connect_reqs);
30957+ atomic_inc_unchecked(&cm_connect_reqs);
30958 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30959 cm_node, cm_id, jiffies);
30960
30961diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h
30962--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
30963+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
30964@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
30965 extern unsigned int wqm_quanta;
30966 extern struct list_head nes_adapter_list;
30967
30968-extern atomic_t cm_connects;
30969-extern atomic_t cm_accepts;
30970-extern atomic_t cm_disconnects;
30971-extern atomic_t cm_closes;
30972-extern atomic_t cm_connecteds;
30973-extern atomic_t cm_connect_reqs;
30974-extern atomic_t cm_rejects;
30975-extern atomic_t mod_qp_timouts;
30976-extern atomic_t qps_created;
30977-extern atomic_t qps_destroyed;
30978-extern atomic_t sw_qps_destroyed;
30979+extern atomic_unchecked_t cm_connects;
30980+extern atomic_unchecked_t cm_accepts;
30981+extern atomic_unchecked_t cm_disconnects;
30982+extern atomic_unchecked_t cm_closes;
30983+extern atomic_unchecked_t cm_connecteds;
30984+extern atomic_unchecked_t cm_connect_reqs;
30985+extern atomic_unchecked_t cm_rejects;
30986+extern atomic_unchecked_t mod_qp_timouts;
30987+extern atomic_unchecked_t qps_created;
30988+extern atomic_unchecked_t qps_destroyed;
30989+extern atomic_unchecked_t sw_qps_destroyed;
30990 extern u32 mh_detected;
30991 extern u32 mh_pauses_sent;
30992 extern u32 cm_packets_sent;
30993@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
30994 extern u32 cm_listens_created;
30995 extern u32 cm_listens_destroyed;
30996 extern u32 cm_backlog_drops;
30997-extern atomic_t cm_loopbacks;
30998-extern atomic_t cm_nodes_created;
30999-extern atomic_t cm_nodes_destroyed;
31000-extern atomic_t cm_accel_dropped_pkts;
31001-extern atomic_t cm_resets_recvd;
31002+extern atomic_unchecked_t cm_loopbacks;
31003+extern atomic_unchecked_t cm_nodes_created;
31004+extern atomic_unchecked_t cm_nodes_destroyed;
31005+extern atomic_unchecked_t cm_accel_dropped_pkts;
31006+extern atomic_unchecked_t cm_resets_recvd;
31007
31008 extern u32 int_mod_timer_init;
31009 extern u32 int_mod_cq_depth_256;
31010diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c
31011--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
31012+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
31013@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
31014 target_stat_values[++index] = mh_detected;
31015 target_stat_values[++index] = mh_pauses_sent;
31016 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
31017- target_stat_values[++index] = atomic_read(&cm_connects);
31018- target_stat_values[++index] = atomic_read(&cm_accepts);
31019- target_stat_values[++index] = atomic_read(&cm_disconnects);
31020- target_stat_values[++index] = atomic_read(&cm_connecteds);
31021- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
31022- target_stat_values[++index] = atomic_read(&cm_rejects);
31023- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
31024- target_stat_values[++index] = atomic_read(&qps_created);
31025- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
31026- target_stat_values[++index] = atomic_read(&qps_destroyed);
31027- target_stat_values[++index] = atomic_read(&cm_closes);
31028+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
31029+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
31030+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
31031+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
31032+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
31033+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
31034+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
31035+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
31036+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
31037+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
31038+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
31039 target_stat_values[++index] = cm_packets_sent;
31040 target_stat_values[++index] = cm_packets_bounced;
31041 target_stat_values[++index] = cm_packets_created;
31042@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
31043 target_stat_values[++index] = cm_listens_created;
31044 target_stat_values[++index] = cm_listens_destroyed;
31045 target_stat_values[++index] = cm_backlog_drops;
31046- target_stat_values[++index] = atomic_read(&cm_loopbacks);
31047- target_stat_values[++index] = atomic_read(&cm_nodes_created);
31048- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
31049- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
31050- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
31051+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
31052+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
31053+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
31054+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
31055+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
31056 target_stat_values[++index] = int_mod_timer_init;
31057 target_stat_values[++index] = int_mod_cq_depth_1;
31058 target_stat_values[++index] = int_mod_cq_depth_4;
31059diff -urNp linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c
31060--- linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
31061+++ linux-2.6.32.45/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
31062@@ -45,9 +45,9 @@
31063
31064 #include <rdma/ib_umem.h>
31065
31066-atomic_t mod_qp_timouts;
31067-atomic_t qps_created;
31068-atomic_t sw_qps_destroyed;
31069+atomic_unchecked_t mod_qp_timouts;
31070+atomic_unchecked_t qps_created;
31071+atomic_unchecked_t sw_qps_destroyed;
31072
31073 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
31074
31075@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
31076 if (init_attr->create_flags)
31077 return ERR_PTR(-EINVAL);
31078
31079- atomic_inc(&qps_created);
31080+ atomic_inc_unchecked(&qps_created);
31081 switch (init_attr->qp_type) {
31082 case IB_QPT_RC:
31083 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
31084@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
31085 struct iw_cm_event cm_event;
31086 int ret;
31087
31088- atomic_inc(&sw_qps_destroyed);
31089+ atomic_inc_unchecked(&sw_qps_destroyed);
31090 nesqp->destroyed = 1;
31091
31092 /* Blow away the connection if it exists. */
31093diff -urNp linux-2.6.32.45/drivers/input/gameport/gameport.c linux-2.6.32.45/drivers/input/gameport/gameport.c
31094--- linux-2.6.32.45/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
31095+++ linux-2.6.32.45/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
31096@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
31097 */
31098 static void gameport_init_port(struct gameport *gameport)
31099 {
31100- static atomic_t gameport_no = ATOMIC_INIT(0);
31101+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
31102
31103 __module_get(THIS_MODULE);
31104
31105 mutex_init(&gameport->drv_mutex);
31106 device_initialize(&gameport->dev);
31107- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
31108+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
31109 gameport->dev.bus = &gameport_bus;
31110 gameport->dev.release = gameport_release_port;
31111 if (gameport->parent)
31112diff -urNp linux-2.6.32.45/drivers/input/input.c linux-2.6.32.45/drivers/input/input.c
31113--- linux-2.6.32.45/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
31114+++ linux-2.6.32.45/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
31115@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
31116 */
31117 int input_register_device(struct input_dev *dev)
31118 {
31119- static atomic_t input_no = ATOMIC_INIT(0);
31120+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
31121 struct input_handler *handler;
31122 const char *path;
31123 int error;
31124@@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
31125 dev->setkeycode = input_default_setkeycode;
31126
31127 dev_set_name(&dev->dev, "input%ld",
31128- (unsigned long) atomic_inc_return(&input_no) - 1);
31129+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
31130
31131 error = device_add(&dev->dev);
31132 if (error)
31133diff -urNp linux-2.6.32.45/drivers/input/joystick/sidewinder.c linux-2.6.32.45/drivers/input/joystick/sidewinder.c
31134--- linux-2.6.32.45/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
31135+++ linux-2.6.32.45/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
31136@@ -30,6 +30,7 @@
31137 #include <linux/kernel.h>
31138 #include <linux/module.h>
31139 #include <linux/slab.h>
31140+#include <linux/sched.h>
31141 #include <linux/init.h>
31142 #include <linux/input.h>
31143 #include <linux/gameport.h>
31144@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
31145 unsigned char buf[SW_LENGTH];
31146 int i;
31147
31148+ pax_track_stack();
31149+
31150 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
31151
31152 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
31153diff -urNp linux-2.6.32.45/drivers/input/joystick/xpad.c linux-2.6.32.45/drivers/input/joystick/xpad.c
31154--- linux-2.6.32.45/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
31155+++ linux-2.6.32.45/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
31156@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
31157
31158 static int xpad_led_probe(struct usb_xpad *xpad)
31159 {
31160- static atomic_t led_seq = ATOMIC_INIT(0);
31161+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
31162 long led_no;
31163 struct xpad_led *led;
31164 struct led_classdev *led_cdev;
31165@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
31166 if (!led)
31167 return -ENOMEM;
31168
31169- led_no = (long)atomic_inc_return(&led_seq) - 1;
31170+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
31171
31172 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
31173 led->xpad = xpad;
31174diff -urNp linux-2.6.32.45/drivers/input/serio/serio.c linux-2.6.32.45/drivers/input/serio/serio.c
31175--- linux-2.6.32.45/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
31176+++ linux-2.6.32.45/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
31177@@ -527,7 +527,7 @@ static void serio_release_port(struct de
31178 */
31179 static void serio_init_port(struct serio *serio)
31180 {
31181- static atomic_t serio_no = ATOMIC_INIT(0);
31182+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
31183
31184 __module_get(THIS_MODULE);
31185
31186@@ -536,7 +536,7 @@ static void serio_init_port(struct serio
31187 mutex_init(&serio->drv_mutex);
31188 device_initialize(&serio->dev);
31189 dev_set_name(&serio->dev, "serio%ld",
31190- (long)atomic_inc_return(&serio_no) - 1);
31191+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
31192 serio->dev.bus = &serio_bus;
31193 serio->dev.release = serio_release_port;
31194 if (serio->parent) {
31195diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/common.c linux-2.6.32.45/drivers/isdn/gigaset/common.c
31196--- linux-2.6.32.45/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
31197+++ linux-2.6.32.45/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
31198@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
31199 cs->commands_pending = 0;
31200 cs->cur_at_seq = 0;
31201 cs->gotfwver = -1;
31202- cs->open_count = 0;
31203+ local_set(&cs->open_count, 0);
31204 cs->dev = NULL;
31205 cs->tty = NULL;
31206 cs->tty_dev = NULL;
31207diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h
31208--- linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
31209+++ linux-2.6.32.45/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
31210@@ -34,6 +34,7 @@
31211 #include <linux/tty_driver.h>
31212 #include <linux/list.h>
31213 #include <asm/atomic.h>
31214+#include <asm/local.h>
31215
31216 #define GIG_VERSION {0,5,0,0}
31217 #define GIG_COMPAT {0,4,0,0}
31218@@ -446,7 +447,7 @@ struct cardstate {
31219 spinlock_t cmdlock;
31220 unsigned curlen, cmdbytes;
31221
31222- unsigned open_count;
31223+ local_t open_count;
31224 struct tty_struct *tty;
31225 struct tasklet_struct if_wake_tasklet;
31226 unsigned control_state;
31227diff -urNp linux-2.6.32.45/drivers/isdn/gigaset/interface.c linux-2.6.32.45/drivers/isdn/gigaset/interface.c
31228--- linux-2.6.32.45/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
31229+++ linux-2.6.32.45/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
31230@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
31231 return -ERESTARTSYS; // FIXME -EINTR?
31232 tty->driver_data = cs;
31233
31234- ++cs->open_count;
31235-
31236- if (cs->open_count == 1) {
31237+ if (local_inc_return(&cs->open_count) == 1) {
31238 spin_lock_irqsave(&cs->lock, flags);
31239 cs->tty = tty;
31240 spin_unlock_irqrestore(&cs->lock, flags);
31241@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
31242
31243 if (!cs->connected)
31244 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31245- else if (!cs->open_count)
31246+ else if (!local_read(&cs->open_count))
31247 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31248 else {
31249- if (!--cs->open_count) {
31250+ if (!local_dec_return(&cs->open_count)) {
31251 spin_lock_irqsave(&cs->lock, flags);
31252 cs->tty = NULL;
31253 spin_unlock_irqrestore(&cs->lock, flags);
31254@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
31255 if (!cs->connected) {
31256 gig_dbg(DEBUG_IF, "not connected");
31257 retval = -ENODEV;
31258- } else if (!cs->open_count)
31259+ } else if (!local_read(&cs->open_count))
31260 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31261 else {
31262 retval = 0;
31263@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
31264 if (!cs->connected) {
31265 gig_dbg(DEBUG_IF, "not connected");
31266 retval = -ENODEV;
31267- } else if (!cs->open_count)
31268+ } else if (!local_read(&cs->open_count))
31269 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31270 else if (cs->mstate != MS_LOCKED) {
31271 dev_warn(cs->dev, "can't write to unlocked device\n");
31272@@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
31273 if (!cs->connected) {
31274 gig_dbg(DEBUG_IF, "not connected");
31275 retval = -ENODEV;
31276- } else if (!cs->open_count)
31277+ } else if (!local_read(&cs->open_count))
31278 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31279 else if (cs->mstate != MS_LOCKED) {
31280 dev_warn(cs->dev, "can't write to unlocked device\n");
31281@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
31282
31283 if (!cs->connected)
31284 gig_dbg(DEBUG_IF, "not connected");
31285- else if (!cs->open_count)
31286+ else if (!local_read(&cs->open_count))
31287 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31288 else if (cs->mstate != MS_LOCKED)
31289 dev_warn(cs->dev, "can't write to unlocked device\n");
31290@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
31291
31292 if (!cs->connected)
31293 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31294- else if (!cs->open_count)
31295+ else if (!local_read(&cs->open_count))
31296 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31297 else {
31298 //FIXME
31299@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
31300
31301 if (!cs->connected)
31302 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31303- else if (!cs->open_count)
31304+ else if (!local_read(&cs->open_count))
31305 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31306 else {
31307 //FIXME
31308@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
31309 goto out;
31310 }
31311
31312- if (!cs->open_count) {
31313+ if (!local_read(&cs->open_count)) {
31314 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31315 goto out;
31316 }
31317diff -urNp linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c
31318--- linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
31319+++ linux-2.6.32.45/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
31320@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
31321 }
31322 if (left) {
31323 if (t4file->user) {
31324- if (copy_from_user(buf, dp, left))
31325+ if (left > sizeof buf || copy_from_user(buf, dp, left))
31326 return -EFAULT;
31327 } else {
31328 memcpy(buf, dp, left);
31329@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
31330 }
31331 if (left) {
31332 if (config->user) {
31333- if (copy_from_user(buf, dp, left))
31334+ if (left > sizeof buf || copy_from_user(buf, dp, left))
31335 return -EFAULT;
31336 } else {
31337 memcpy(buf, dp, left);
31338diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c
31339--- linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
31340+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
31341@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
31342 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
31343 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
31344
31345+ pax_track_stack();
31346
31347 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
31348 {
31349diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c
31350--- linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
31351+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
31352@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
31353 IDI_SYNC_REQ req;
31354 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31355
31356+ pax_track_stack();
31357+
31358 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31359
31360 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31361diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c
31362--- linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
31363+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
31364@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
31365 IDI_SYNC_REQ req;
31366 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31367
31368+ pax_track_stack();
31369+
31370 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31371
31372 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31373diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c
31374--- linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
31375+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
31376@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
31377 IDI_SYNC_REQ req;
31378 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31379
31380+ pax_track_stack();
31381+
31382 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31383
31384 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31385diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h
31386--- linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h 2011-03-27 14:31:47.000000000 -0400
31387+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/divasync.h 2011-08-05 20:33:55.000000000 -0400
31388@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
31389 } diva_didd_add_adapter_t;
31390 typedef struct _diva_didd_remove_adapter {
31391 IDI_CALL p_request;
31392-} diva_didd_remove_adapter_t;
31393+} __no_const diva_didd_remove_adapter_t;
31394 typedef struct _diva_didd_read_adapter_array {
31395 void * buffer;
31396 dword length;
31397diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c
31398--- linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
31399+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
31400@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
31401 IDI_SYNC_REQ req;
31402 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31403
31404+ pax_track_stack();
31405+
31406 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31407
31408 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31409diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c
31410--- linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
31411+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
31412@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
31413 dword d;
31414 word w;
31415
31416+ pax_track_stack();
31417+
31418 a = plci->adapter;
31419 Id = ((word)plci->Id<<8)|a->Id;
31420 PUT_WORD(&SS_Ind[4],0x0000);
31421@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
31422 word j, n, w;
31423 dword d;
31424
31425+ pax_track_stack();
31426+
31427
31428 for(i=0;i<8;i++) bp_parms[i].length = 0;
31429 for(i=0;i<2;i++) global_config[i].length = 0;
31430@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
31431 const byte llc3[] = {4,3,2,2,6,6,0};
31432 const byte header[] = {0,2,3,3,0,0,0};
31433
31434+ pax_track_stack();
31435+
31436 for(i=0;i<8;i++) bp_parms[i].length = 0;
31437 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
31438 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
31439@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
31440 word appl_number_group_type[MAX_APPL];
31441 PLCI *auxplci;
31442
31443+ pax_track_stack();
31444+
31445 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
31446
31447 if(!a->group_optimization_enabled)
31448diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c
31449--- linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
31450+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
31451@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
31452 IDI_SYNC_REQ req;
31453 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31454
31455+ pax_track_stack();
31456+
31457 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31458
31459 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31460diff -urNp linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h
31461--- linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-03-27 14:31:47.000000000 -0400
31462+++ linux-2.6.32.45/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-05 20:33:55.000000000 -0400
31463@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
31464 typedef struct _diva_os_idi_adapter_interface {
31465 diva_init_card_proc_t cleanup_adapter_proc;
31466 diva_cmd_card_proc_t cmd_proc;
31467-} diva_os_idi_adapter_interface_t;
31468+} __no_const diva_os_idi_adapter_interface_t;
31469
31470 typedef struct _diva_os_xdi_adapter {
31471 struct list_head link;
31472diff -urNp linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c
31473--- linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
31474+++ linux-2.6.32.45/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
31475@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
31476 } iocpar;
31477 void __user *argp = (void __user *)arg;
31478
31479+ pax_track_stack();
31480+
31481 #define name iocpar.name
31482 #define bname iocpar.bname
31483 #define iocts iocpar.iocts
31484diff -urNp linux-2.6.32.45/drivers/isdn/icn/icn.c linux-2.6.32.45/drivers/isdn/icn/icn.c
31485--- linux-2.6.32.45/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
31486+++ linux-2.6.32.45/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
31487@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
31488 if (count > len)
31489 count = len;
31490 if (user) {
31491- if (copy_from_user(msg, buf, count))
31492+ if (count > sizeof msg || copy_from_user(msg, buf, count))
31493 return -EFAULT;
31494 } else
31495 memcpy(msg, buf, count);
31496diff -urNp linux-2.6.32.45/drivers/isdn/mISDN/socket.c linux-2.6.32.45/drivers/isdn/mISDN/socket.c
31497--- linux-2.6.32.45/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
31498+++ linux-2.6.32.45/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
31499@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
31500 if (dev) {
31501 struct mISDN_devinfo di;
31502
31503+ memset(&di, 0, sizeof(di));
31504 di.id = dev->id;
31505 di.Dprotocols = dev->Dprotocols;
31506 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31507@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
31508 if (dev) {
31509 struct mISDN_devinfo di;
31510
31511+ memset(&di, 0, sizeof(di));
31512 di.id = dev->id;
31513 di.Dprotocols = dev->Dprotocols;
31514 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31515diff -urNp linux-2.6.32.45/drivers/isdn/sc/interrupt.c linux-2.6.32.45/drivers/isdn/sc/interrupt.c
31516--- linux-2.6.32.45/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
31517+++ linux-2.6.32.45/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
31518@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
31519 }
31520 else if(callid>=0x0000 && callid<=0x7FFF)
31521 {
31522+ int len;
31523+
31524 pr_debug("%s: Got Incoming Call\n",
31525 sc_adapter[card]->devicename);
31526- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
31527- strcpy(setup.eazmsn,
31528- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
31529+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
31530+ sizeof(setup.phone));
31531+ if (len >= sizeof(setup.phone))
31532+ continue;
31533+ len = strlcpy(setup.eazmsn,
31534+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31535+ sizeof(setup.eazmsn));
31536+ if (len >= sizeof(setup.eazmsn))
31537+ continue;
31538 setup.si1 = 7;
31539 setup.si2 = 0;
31540 setup.plan = 0;
31541@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
31542 * Handle a GetMyNumber Rsp
31543 */
31544 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
31545- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
31546+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31547+ rcvmsg.msg_data.byte_array,
31548+ sizeof(rcvmsg.msg_data.byte_array));
31549 continue;
31550 }
31551
31552diff -urNp linux-2.6.32.45/drivers/lguest/core.c linux-2.6.32.45/drivers/lguest/core.c
31553--- linux-2.6.32.45/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
31554+++ linux-2.6.32.45/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
31555@@ -91,9 +91,17 @@ static __init int map_switcher(void)
31556 * it's worked so far. The end address needs +1 because __get_vm_area
31557 * allocates an extra guard page, so we need space for that.
31558 */
31559+
31560+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31561+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31562+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
31563+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31564+#else
31565 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31566 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
31567 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31568+#endif
31569+
31570 if (!switcher_vma) {
31571 err = -ENOMEM;
31572 printk("lguest: could not map switcher pages high\n");
31573@@ -118,7 +126,7 @@ static __init int map_switcher(void)
31574 * Now the Switcher is mapped at the right address, we can't fail!
31575 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
31576 */
31577- memcpy(switcher_vma->addr, start_switcher_text,
31578+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
31579 end_switcher_text - start_switcher_text);
31580
31581 printk(KERN_INFO "lguest: mapped switcher at %p\n",
31582diff -urNp linux-2.6.32.45/drivers/lguest/x86/core.c linux-2.6.32.45/drivers/lguest/x86/core.c
31583--- linux-2.6.32.45/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
31584+++ linux-2.6.32.45/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
31585@@ -59,7 +59,7 @@ static struct {
31586 /* Offset from where switcher.S was compiled to where we've copied it */
31587 static unsigned long switcher_offset(void)
31588 {
31589- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31590+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31591 }
31592
31593 /* This cpu's struct lguest_pages. */
31594@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
31595 * These copies are pretty cheap, so we do them unconditionally: */
31596 /* Save the current Host top-level page directory.
31597 */
31598+
31599+#ifdef CONFIG_PAX_PER_CPU_PGD
31600+ pages->state.host_cr3 = read_cr3();
31601+#else
31602 pages->state.host_cr3 = __pa(current->mm->pgd);
31603+#endif
31604+
31605 /*
31606 * Set up the Guest's page tables to see this CPU's pages (and no
31607 * other CPU's pages).
31608@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
31609 * compiled-in switcher code and the high-mapped copy we just made.
31610 */
31611 for (i = 0; i < IDT_ENTRIES; i++)
31612- default_idt_entries[i] += switcher_offset();
31613+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31614
31615 /*
31616 * Set up the Switcher's per-cpu areas.
31617@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
31618 * it will be undisturbed when we switch. To change %cs and jump we
31619 * need this structure to feed to Intel's "lcall" instruction.
31620 */
31621- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31622+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31623 lguest_entry.segment = LGUEST_CS;
31624
31625 /*
31626diff -urNp linux-2.6.32.45/drivers/lguest/x86/switcher_32.S linux-2.6.32.45/drivers/lguest/x86/switcher_32.S
31627--- linux-2.6.32.45/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
31628+++ linux-2.6.32.45/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
31629@@ -87,6 +87,7 @@
31630 #include <asm/page.h>
31631 #include <asm/segment.h>
31632 #include <asm/lguest.h>
31633+#include <asm/processor-flags.h>
31634
31635 // We mark the start of the code to copy
31636 // It's placed in .text tho it's never run here
31637@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31638 // Changes type when we load it: damn Intel!
31639 // For after we switch over our page tables
31640 // That entry will be read-only: we'd crash.
31641+
31642+#ifdef CONFIG_PAX_KERNEXEC
31643+ mov %cr0, %edx
31644+ xor $X86_CR0_WP, %edx
31645+ mov %edx, %cr0
31646+#endif
31647+
31648 movl $(GDT_ENTRY_TSS*8), %edx
31649 ltr %dx
31650
31651@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31652 // Let's clear it again for our return.
31653 // The GDT descriptor of the Host
31654 // Points to the table after two "size" bytes
31655- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31656+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31657 // Clear "used" from type field (byte 5, bit 2)
31658- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31659+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31660+
31661+#ifdef CONFIG_PAX_KERNEXEC
31662+ mov %cr0, %eax
31663+ xor $X86_CR0_WP, %eax
31664+ mov %eax, %cr0
31665+#endif
31666
31667 // Once our page table's switched, the Guest is live!
31668 // The Host fades as we run this final step.
31669@@ -295,13 +309,12 @@ deliver_to_host:
31670 // I consulted gcc, and it gave
31671 // These instructions, which I gladly credit:
31672 leal (%edx,%ebx,8), %eax
31673- movzwl (%eax),%edx
31674- movl 4(%eax), %eax
31675- xorw %ax, %ax
31676- orl %eax, %edx
31677+ movl 4(%eax), %edx
31678+ movw (%eax), %dx
31679 // Now the address of the handler's in %edx
31680 // We call it now: its "iret" drops us home.
31681- jmp *%edx
31682+ ljmp $__KERNEL_CS, $1f
31683+1: jmp *%edx
31684
31685 // Every interrupt can come to us here
31686 // But we must truly tell each apart.
31687diff -urNp linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c
31688--- linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
31689+++ linux-2.6.32.45/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
31690@@ -15,7 +15,7 @@
31691
31692 #define MAX_PMU_LEVEL 0xFF
31693
31694-static struct backlight_ops pmu_backlight_data;
31695+static const struct backlight_ops pmu_backlight_data;
31696 static DEFINE_SPINLOCK(pmu_backlight_lock);
31697 static int sleeping, uses_pmu_bl;
31698 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
31699@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
31700 return bd->props.brightness;
31701 }
31702
31703-static struct backlight_ops pmu_backlight_data = {
31704+static const struct backlight_ops pmu_backlight_data = {
31705 .get_brightness = pmu_backlight_get_brightness,
31706 .update_status = pmu_backlight_update_status,
31707
31708diff -urNp linux-2.6.32.45/drivers/macintosh/via-pmu.c linux-2.6.32.45/drivers/macintosh/via-pmu.c
31709--- linux-2.6.32.45/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
31710+++ linux-2.6.32.45/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
31711@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
31712 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
31713 }
31714
31715-static struct platform_suspend_ops pmu_pm_ops = {
31716+static const struct platform_suspend_ops pmu_pm_ops = {
31717 .enter = powerbook_sleep,
31718 .valid = pmu_sleep_valid,
31719 };
31720diff -urNp linux-2.6.32.45/drivers/md/dm.c linux-2.6.32.45/drivers/md/dm.c
31721--- linux-2.6.32.45/drivers/md/dm.c 2011-08-09 18:35:29.000000000 -0400
31722+++ linux-2.6.32.45/drivers/md/dm.c 2011-08-09 18:33:59.000000000 -0400
31723@@ -165,9 +165,9 @@ struct mapped_device {
31724 /*
31725 * Event handling.
31726 */
31727- atomic_t event_nr;
31728+ atomic_unchecked_t event_nr;
31729 wait_queue_head_t eventq;
31730- atomic_t uevent_seq;
31731+ atomic_unchecked_t uevent_seq;
31732 struct list_head uevent_list;
31733 spinlock_t uevent_lock; /* Protect access to uevent_list */
31734
31735@@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(i
31736 rwlock_init(&md->map_lock);
31737 atomic_set(&md->holders, 1);
31738 atomic_set(&md->open_count, 0);
31739- atomic_set(&md->event_nr, 0);
31740- atomic_set(&md->uevent_seq, 0);
31741+ atomic_set_unchecked(&md->event_nr, 0);
31742+ atomic_set_unchecked(&md->uevent_seq, 0);
31743 INIT_LIST_HEAD(&md->uevent_list);
31744 spin_lock_init(&md->uevent_lock);
31745
31746@@ -1927,7 +1927,7 @@ static void event_callback(void *context
31747
31748 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31749
31750- atomic_inc(&md->event_nr);
31751+ atomic_inc_unchecked(&md->event_nr);
31752 wake_up(&md->eventq);
31753 }
31754
31755@@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_dev
31756
31757 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31758 {
31759- return atomic_add_return(1, &md->uevent_seq);
31760+ return atomic_add_return_unchecked(1, &md->uevent_seq);
31761 }
31762
31763 uint32_t dm_get_event_nr(struct mapped_device *md)
31764 {
31765- return atomic_read(&md->event_nr);
31766+ return atomic_read_unchecked(&md->event_nr);
31767 }
31768
31769 int dm_wait_event(struct mapped_device *md, int event_nr)
31770 {
31771 return wait_event_interruptible(md->eventq,
31772- (event_nr != atomic_read(&md->event_nr)));
31773+ (event_nr != atomic_read_unchecked(&md->event_nr)));
31774 }
31775
31776 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31777diff -urNp linux-2.6.32.45/drivers/md/dm-ioctl.c linux-2.6.32.45/drivers/md/dm-ioctl.c
31778--- linux-2.6.32.45/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
31779+++ linux-2.6.32.45/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
31780@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
31781 cmd == DM_LIST_VERSIONS_CMD)
31782 return 0;
31783
31784- if ((cmd == DM_DEV_CREATE_CMD)) {
31785+ if (cmd == DM_DEV_CREATE_CMD) {
31786 if (!*param->name) {
31787 DMWARN("name not supplied when creating device");
31788 return -EINVAL;
31789diff -urNp linux-2.6.32.45/drivers/md/dm-raid1.c linux-2.6.32.45/drivers/md/dm-raid1.c
31790--- linux-2.6.32.45/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
31791+++ linux-2.6.32.45/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
31792@@ -41,7 +41,7 @@ enum dm_raid1_error {
31793
31794 struct mirror {
31795 struct mirror_set *ms;
31796- atomic_t error_count;
31797+ atomic_unchecked_t error_count;
31798 unsigned long error_type;
31799 struct dm_dev *dev;
31800 sector_t offset;
31801@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
31802 * simple way to tell if a device has encountered
31803 * errors.
31804 */
31805- atomic_inc(&m->error_count);
31806+ atomic_inc_unchecked(&m->error_count);
31807
31808 if (test_and_set_bit(error_type, &m->error_type))
31809 return;
31810@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
31811 }
31812
31813 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
31814- if (!atomic_read(&new->error_count)) {
31815+ if (!atomic_read_unchecked(&new->error_count)) {
31816 set_default_mirror(new);
31817 break;
31818 }
31819@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
31820 struct mirror *m = get_default_mirror(ms);
31821
31822 do {
31823- if (likely(!atomic_read(&m->error_count)))
31824+ if (likely(!atomic_read_unchecked(&m->error_count)))
31825 return m;
31826
31827 if (m-- == ms->mirror)
31828@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
31829 {
31830 struct mirror *default_mirror = get_default_mirror(m->ms);
31831
31832- return !atomic_read(&default_mirror->error_count);
31833+ return !atomic_read_unchecked(&default_mirror->error_count);
31834 }
31835
31836 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31837@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
31838 */
31839 if (likely(region_in_sync(ms, region, 1)))
31840 m = choose_mirror(ms, bio->bi_sector);
31841- else if (m && atomic_read(&m->error_count))
31842+ else if (m && atomic_read_unchecked(&m->error_count))
31843 m = NULL;
31844
31845 if (likely(m))
31846@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
31847 }
31848
31849 ms->mirror[mirror].ms = ms;
31850- atomic_set(&(ms->mirror[mirror].error_count), 0);
31851+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31852 ms->mirror[mirror].error_type = 0;
31853 ms->mirror[mirror].offset = offset;
31854
31855@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
31856 */
31857 static char device_status_char(struct mirror *m)
31858 {
31859- if (!atomic_read(&(m->error_count)))
31860+ if (!atomic_read_unchecked(&(m->error_count)))
31861 return 'A';
31862
31863 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
31864diff -urNp linux-2.6.32.45/drivers/md/dm-stripe.c linux-2.6.32.45/drivers/md/dm-stripe.c
31865--- linux-2.6.32.45/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
31866+++ linux-2.6.32.45/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
31867@@ -20,7 +20,7 @@ struct stripe {
31868 struct dm_dev *dev;
31869 sector_t physical_start;
31870
31871- atomic_t error_count;
31872+ atomic_unchecked_t error_count;
31873 };
31874
31875 struct stripe_c {
31876@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
31877 kfree(sc);
31878 return r;
31879 }
31880- atomic_set(&(sc->stripe[i].error_count), 0);
31881+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31882 }
31883
31884 ti->private = sc;
31885@@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
31886 DMEMIT("%d ", sc->stripes);
31887 for (i = 0; i < sc->stripes; i++) {
31888 DMEMIT("%s ", sc->stripe[i].dev->name);
31889- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31890+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31891 'D' : 'A';
31892 }
31893 buffer[i] = '\0';
31894@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
31895 */
31896 for (i = 0; i < sc->stripes; i++)
31897 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31898- atomic_inc(&(sc->stripe[i].error_count));
31899- if (atomic_read(&(sc->stripe[i].error_count)) <
31900+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
31901+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31902 DM_IO_ERROR_THRESHOLD)
31903 queue_work(kstriped, &sc->kstriped_ws);
31904 }
31905diff -urNp linux-2.6.32.45/drivers/md/dm-sysfs.c linux-2.6.32.45/drivers/md/dm-sysfs.c
31906--- linux-2.6.32.45/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
31907+++ linux-2.6.32.45/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
31908@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
31909 NULL,
31910 };
31911
31912-static struct sysfs_ops dm_sysfs_ops = {
31913+static const struct sysfs_ops dm_sysfs_ops = {
31914 .show = dm_attr_show,
31915 };
31916
31917diff -urNp linux-2.6.32.45/drivers/md/dm-table.c linux-2.6.32.45/drivers/md/dm-table.c
31918--- linux-2.6.32.45/drivers/md/dm-table.c 2011-06-25 12:55:34.000000000 -0400
31919+++ linux-2.6.32.45/drivers/md/dm-table.c 2011-06-25 12:56:37.000000000 -0400
31920@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
31921 if (!dev_size)
31922 return 0;
31923
31924- if ((start >= dev_size) || (start + len > dev_size)) {
31925+ if ((start >= dev_size) || (len > dev_size - start)) {
31926 DMWARN("%s: %s too small for target: "
31927 "start=%llu, len=%llu, dev_size=%llu",
31928 dm_device_name(ti->table->md), bdevname(bdev, b),
31929diff -urNp linux-2.6.32.45/drivers/md/md.c linux-2.6.32.45/drivers/md/md.c
31930--- linux-2.6.32.45/drivers/md/md.c 2011-07-13 17:23:04.000000000 -0400
31931+++ linux-2.6.32.45/drivers/md/md.c 2011-07-13 17:23:18.000000000 -0400
31932@@ -153,10 +153,10 @@ static int start_readonly;
31933 * start build, activate spare
31934 */
31935 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31936-static atomic_t md_event_count;
31937+static atomic_unchecked_t md_event_count;
31938 void md_new_event(mddev_t *mddev)
31939 {
31940- atomic_inc(&md_event_count);
31941+ atomic_inc_unchecked(&md_event_count);
31942 wake_up(&md_event_waiters);
31943 }
31944 EXPORT_SYMBOL_GPL(md_new_event);
31945@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31946 */
31947 static void md_new_event_inintr(mddev_t *mddev)
31948 {
31949- atomic_inc(&md_event_count);
31950+ atomic_inc_unchecked(&md_event_count);
31951 wake_up(&md_event_waiters);
31952 }
31953
31954@@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
31955
31956 rdev->preferred_minor = 0xffff;
31957 rdev->data_offset = le64_to_cpu(sb->data_offset);
31958- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31959+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31960
31961 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31962 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31963@@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
31964 else
31965 sb->resync_offset = cpu_to_le64(0);
31966
31967- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31968+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31969
31970 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31971 sb->size = cpu_to_le64(mddev->dev_sectors);
31972@@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
31973 static ssize_t
31974 errors_show(mdk_rdev_t *rdev, char *page)
31975 {
31976- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31977+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31978 }
31979
31980 static ssize_t
31981@@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
31982 char *e;
31983 unsigned long n = simple_strtoul(buf, &e, 10);
31984 if (*buf && (*e == 0 || *e == '\n')) {
31985- atomic_set(&rdev->corrected_errors, n);
31986+ atomic_set_unchecked(&rdev->corrected_errors, n);
31987 return len;
31988 }
31989 return -EINVAL;
31990@@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
31991 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
31992 kfree(rdev);
31993 }
31994-static struct sysfs_ops rdev_sysfs_ops = {
31995+static const struct sysfs_ops rdev_sysfs_ops = {
31996 .show = rdev_attr_show,
31997 .store = rdev_attr_store,
31998 };
31999@@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
32000 rdev->data_offset = 0;
32001 rdev->sb_events = 0;
32002 atomic_set(&rdev->nr_pending, 0);
32003- atomic_set(&rdev->read_errors, 0);
32004- atomic_set(&rdev->corrected_errors, 0);
32005+ atomic_set_unchecked(&rdev->read_errors, 0);
32006+ atomic_set_unchecked(&rdev->corrected_errors, 0);
32007
32008 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
32009 if (!size) {
32010@@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
32011 kfree(mddev);
32012 }
32013
32014-static struct sysfs_ops md_sysfs_ops = {
32015+static const struct sysfs_ops md_sysfs_ops = {
32016 .show = md_attr_show,
32017 .store = md_attr_store,
32018 };
32019@@ -4474,7 +4474,8 @@ out:
32020 err = 0;
32021 blk_integrity_unregister(disk);
32022 md_new_event(mddev);
32023- sysfs_notify_dirent(mddev->sysfs_state);
32024+ if (mddev->sysfs_state)
32025+ sysfs_notify_dirent(mddev->sysfs_state);
32026 return err;
32027 }
32028
32029@@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
32030
32031 spin_unlock(&pers_lock);
32032 seq_printf(seq, "\n");
32033- mi->event = atomic_read(&md_event_count);
32034+ mi->event = atomic_read_unchecked(&md_event_count);
32035 return 0;
32036 }
32037 if (v == (void*)2) {
32038@@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
32039 chunk_kb ? "KB" : "B");
32040 if (bitmap->file) {
32041 seq_printf(seq, ", file: ");
32042- seq_path(seq, &bitmap->file->f_path, " \t\n");
32043+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
32044 }
32045
32046 seq_printf(seq, "\n");
32047@@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
32048 else {
32049 struct seq_file *p = file->private_data;
32050 p->private = mi;
32051- mi->event = atomic_read(&md_event_count);
32052+ mi->event = atomic_read_unchecked(&md_event_count);
32053 }
32054 return error;
32055 }
32056@@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
32057 /* always allow read */
32058 mask = POLLIN | POLLRDNORM;
32059
32060- if (mi->event != atomic_read(&md_event_count))
32061+ if (mi->event != atomic_read_unchecked(&md_event_count))
32062 mask |= POLLERR | POLLPRI;
32063 return mask;
32064 }
32065@@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
32066 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
32067 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
32068 (int)part_stat_read(&disk->part0, sectors[1]) -
32069- atomic_read(&disk->sync_io);
32070+ atomic_read_unchecked(&disk->sync_io);
32071 /* sync IO will cause sync_io to increase before the disk_stats
32072 * as sync_io is counted when a request starts, and
32073 * disk_stats is counted when it completes.
32074diff -urNp linux-2.6.32.45/drivers/md/md.h linux-2.6.32.45/drivers/md/md.h
32075--- linux-2.6.32.45/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
32076+++ linux-2.6.32.45/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
32077@@ -94,10 +94,10 @@ struct mdk_rdev_s
32078 * only maintained for arrays that
32079 * support hot removal
32080 */
32081- atomic_t read_errors; /* number of consecutive read errors that
32082+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
32083 * we have tried to ignore.
32084 */
32085- atomic_t corrected_errors; /* number of corrected read errors,
32086+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
32087 * for reporting to userspace and storing
32088 * in superblock.
32089 */
32090@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
32091
32092 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
32093 {
32094- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32095+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32096 }
32097
32098 struct mdk_personality
32099diff -urNp linux-2.6.32.45/drivers/md/raid10.c linux-2.6.32.45/drivers/md/raid10.c
32100--- linux-2.6.32.45/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
32101+++ linux-2.6.32.45/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
32102@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
32103 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
32104 set_bit(R10BIO_Uptodate, &r10_bio->state);
32105 else {
32106- atomic_add(r10_bio->sectors,
32107+ atomic_add_unchecked(r10_bio->sectors,
32108 &conf->mirrors[d].rdev->corrected_errors);
32109 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
32110 md_error(r10_bio->mddev,
32111@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
32112 test_bit(In_sync, &rdev->flags)) {
32113 atomic_inc(&rdev->nr_pending);
32114 rcu_read_unlock();
32115- atomic_add(s, &rdev->corrected_errors);
32116+ atomic_add_unchecked(s, &rdev->corrected_errors);
32117 if (sync_page_io(rdev->bdev,
32118 r10_bio->devs[sl].addr +
32119 sect + rdev->data_offset,
32120diff -urNp linux-2.6.32.45/drivers/md/raid1.c linux-2.6.32.45/drivers/md/raid1.c
32121--- linux-2.6.32.45/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
32122+++ linux-2.6.32.45/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
32123@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
32124 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
32125 continue;
32126 rdev = conf->mirrors[d].rdev;
32127- atomic_add(s, &rdev->corrected_errors);
32128+ atomic_add_unchecked(s, &rdev->corrected_errors);
32129 if (sync_page_io(rdev->bdev,
32130 sect + rdev->data_offset,
32131 s<<9,
32132@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
32133 /* Well, this device is dead */
32134 md_error(mddev, rdev);
32135 else {
32136- atomic_add(s, &rdev->corrected_errors);
32137+ atomic_add_unchecked(s, &rdev->corrected_errors);
32138 printk(KERN_INFO
32139 "raid1:%s: read error corrected "
32140 "(%d sectors at %llu on %s)\n",
32141diff -urNp linux-2.6.32.45/drivers/md/raid5.c linux-2.6.32.45/drivers/md/raid5.c
32142--- linux-2.6.32.45/drivers/md/raid5.c 2011-06-25 12:55:34.000000000 -0400
32143+++ linux-2.6.32.45/drivers/md/raid5.c 2011-06-25 12:58:39.000000000 -0400
32144@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
32145 bi->bi_next = NULL;
32146 if ((rw & WRITE) &&
32147 test_bit(R5_ReWrite, &sh->dev[i].flags))
32148- atomic_add(STRIPE_SECTORS,
32149+ atomic_add_unchecked(STRIPE_SECTORS,
32150 &rdev->corrected_errors);
32151 generic_make_request(bi);
32152 } else {
32153@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
32154 clear_bit(R5_ReadError, &sh->dev[i].flags);
32155 clear_bit(R5_ReWrite, &sh->dev[i].flags);
32156 }
32157- if (atomic_read(&conf->disks[i].rdev->read_errors))
32158- atomic_set(&conf->disks[i].rdev->read_errors, 0);
32159+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
32160+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
32161 } else {
32162 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
32163 int retry = 0;
32164 rdev = conf->disks[i].rdev;
32165
32166 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
32167- atomic_inc(&rdev->read_errors);
32168+ atomic_inc_unchecked(&rdev->read_errors);
32169 if (conf->mddev->degraded >= conf->max_degraded)
32170 printk_rl(KERN_WARNING
32171 "raid5:%s: read error not correctable "
32172@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
32173 (unsigned long long)(sh->sector
32174 + rdev->data_offset),
32175 bdn);
32176- else if (atomic_read(&rdev->read_errors)
32177+ else if (atomic_read_unchecked(&rdev->read_errors)
32178 > conf->max_nr_stripes)
32179 printk(KERN_WARNING
32180 "raid5:%s: Too many read errors, failing device %s.\n",
32181@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
32182 sector_t r_sector;
32183 struct stripe_head sh2;
32184
32185+ pax_track_stack();
32186
32187 chunk_offset = sector_div(new_sector, sectors_per_chunk);
32188 stripe = new_sector;
32189diff -urNp linux-2.6.32.45/drivers/media/common/saa7146_fops.c linux-2.6.32.45/drivers/media/common/saa7146_fops.c
32190--- linux-2.6.32.45/drivers/media/common/saa7146_fops.c 2011-03-27 14:31:47.000000000 -0400
32191+++ linux-2.6.32.45/drivers/media/common/saa7146_fops.c 2011-08-05 20:33:55.000000000 -0400
32192@@ -458,7 +458,7 @@ int saa7146_vv_init(struct saa7146_dev*
32193 ERR(("out of memory. aborting.\n"));
32194 return -ENOMEM;
32195 }
32196- ext_vv->ops = saa7146_video_ioctl_ops;
32197+ memcpy((void *)&ext_vv->ops, &saa7146_video_ioctl_ops, sizeof(saa7146_video_ioctl_ops));
32198 ext_vv->core_ops = &saa7146_video_ioctl_ops;
32199
32200 DEB_EE(("dev:%p\n",dev));
32201diff -urNp linux-2.6.32.45/drivers/media/common/saa7146_hlp.c linux-2.6.32.45/drivers/media/common/saa7146_hlp.c
32202--- linux-2.6.32.45/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
32203+++ linux-2.6.32.45/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
32204@@ -353,6 +353,8 @@ static void calculate_clipping_registers
32205
32206 int x[32], y[32], w[32], h[32];
32207
32208+ pax_track_stack();
32209+
32210 /* clear out memory */
32211 memset(&line_list[0], 0x00, sizeof(u32)*32);
32212 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
32213diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
32214--- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
32215+++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
32216@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
32217 u8 buf[HOST_LINK_BUF_SIZE];
32218 int i;
32219
32220+ pax_track_stack();
32221+
32222 dprintk("%s\n", __func__);
32223
32224 /* check if we have space for a link buf in the rx_buffer */
32225@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
32226 unsigned long timeout;
32227 int written;
32228
32229+ pax_track_stack();
32230+
32231 dprintk("%s\n", __func__);
32232
32233 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
32234diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h
32235--- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h 2011-03-27 14:31:47.000000000 -0400
32236+++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-05 20:33:55.000000000 -0400
32237@@ -71,7 +71,7 @@ struct dvb_demux_feed {
32238 union {
32239 dmx_ts_cb ts;
32240 dmx_section_cb sec;
32241- } cb;
32242+ } __no_const cb;
32243
32244 struct dvb_demux *demux;
32245 void *priv;
32246diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c
32247--- linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
32248+++ linux-2.6.32.45/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-05 20:33:55.000000000 -0400
32249@@ -228,8 +228,8 @@ int dvb_register_device(struct dvb_adapt
32250 dvbdev->fops = dvbdevfops;
32251 init_waitqueue_head (&dvbdev->wait_queue);
32252
32253- memcpy(dvbdevfops, template->fops, sizeof(struct file_operations));
32254- dvbdevfops->owner = adap->module;
32255+ memcpy((void *)dvbdevfops, template->fops, sizeof(struct file_operations));
32256+ *(void **)&dvbdevfops->owner = adap->module;
32257
32258 list_add_tail (&dvbdev->list_head, &adap->device_list);
32259
32260diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c
32261--- linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c 2011-03-27 14:31:47.000000000 -0400
32262+++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-05 20:33:55.000000000 -0400
32263@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_di
32264 struct dib0700_adapter_state {
32265 int (*set_param_save) (struct dvb_frontend *,
32266 struct dvb_frontend_parameters *);
32267-};
32268+} __no_const;
32269
32270 static int dib7070_set_param_override(struct dvb_frontend *fe,
32271 struct dvb_frontend_parameters *fep)
32272diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c
32273--- linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
32274+++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
32275@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
32276
32277 u8 buf[260];
32278
32279+ pax_track_stack();
32280+
32281 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
32282 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
32283
32284diff -urNp linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c
32285--- linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-05-10 22:12:01.000000000 -0400
32286+++ linux-2.6.32.45/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-08-05 20:33:55.000000000 -0400
32287@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "
32288
32289 struct dib0700_adapter_state {
32290 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
32291-};
32292+} __no_const;
32293
32294 /* Hauppauge Nova-T 500 (aka Bristol)
32295 * has a LNA on GPIO0 which is enabled by setting 1 */
32296diff -urNp linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h
32297--- linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h 2011-03-27 14:31:47.000000000 -0400
32298+++ linux-2.6.32.45/drivers/media/dvb/frontends/dib3000.h 2011-08-05 20:33:55.000000000 -0400
32299@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
32300 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
32301 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
32302 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
32303-};
32304+} __no_const;
32305
32306 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
32307 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
32308diff -urNp linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c
32309--- linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
32310+++ linux-2.6.32.45/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
32311@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
32312 u8 tudata[585];
32313 int i;
32314
32315+ pax_track_stack();
32316+
32317 dprintk("Firmware is %zd bytes\n",fw->size);
32318
32319 /* Get eprom data */
32320diff -urNp linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c
32321--- linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c 2011-03-27 14:31:47.000000000 -0400
32322+++ linux-2.6.32.45/drivers/media/dvb/ttpci/av7110_v4l.c 2011-08-05 20:33:55.000000000 -0400
32323@@ -796,18 +796,18 @@ int av7110_init_v4l(struct av7110 *av711
32324 ERR(("cannot init capture device. skipping.\n"));
32325 return -ENODEV;
32326 }
32327- vv_data->ops.vidioc_enum_input = vidioc_enum_input;
32328- vv_data->ops.vidioc_g_input = vidioc_g_input;
32329- vv_data->ops.vidioc_s_input = vidioc_s_input;
32330- vv_data->ops.vidioc_g_tuner = vidioc_g_tuner;
32331- vv_data->ops.vidioc_s_tuner = vidioc_s_tuner;
32332- vv_data->ops.vidioc_g_frequency = vidioc_g_frequency;
32333- vv_data->ops.vidioc_s_frequency = vidioc_s_frequency;
32334- vv_data->ops.vidioc_g_audio = vidioc_g_audio;
32335- vv_data->ops.vidioc_s_audio = vidioc_s_audio;
32336- vv_data->ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
32337- vv_data->ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
32338- vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
32339+ *(void **)&vv_data->ops.vidioc_enum_input = vidioc_enum_input;
32340+ *(void **)&vv_data->ops.vidioc_g_input = vidioc_g_input;
32341+ *(void **)&vv_data->ops.vidioc_s_input = vidioc_s_input;
32342+ *(void **)&vv_data->ops.vidioc_g_tuner = vidioc_g_tuner;
32343+ *(void **)&vv_data->ops.vidioc_s_tuner = vidioc_s_tuner;
32344+ *(void **)&vv_data->ops.vidioc_g_frequency = vidioc_g_frequency;
32345+ *(void **)&vv_data->ops.vidioc_s_frequency = vidioc_s_frequency;
32346+ *(void **)&vv_data->ops.vidioc_g_audio = vidioc_g_audio;
32347+ *(void **)&vv_data->ops.vidioc_s_audio = vidioc_s_audio;
32348+ *(void **)&vv_data->ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
32349+ *(void **)&vv_data->ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
32350+ *(void **)&vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
32351
32352 if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_GRABBER)) {
32353 ERR(("cannot register capture device. skipping.\n"));
32354diff -urNp linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c
32355--- linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c 2011-03-27 14:31:47.000000000 -0400
32356+++ linux-2.6.32.45/drivers/media/dvb/ttpci/budget-av.c 2011-08-05 20:33:55.000000000 -0400
32357@@ -1477,9 +1477,9 @@ static int budget_av_attach(struct saa71
32358 ERR(("cannot init vv subsystem.\n"));
32359 return err;
32360 }
32361- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32362- vv_data.ops.vidioc_g_input = vidioc_g_input;
32363- vv_data.ops.vidioc_s_input = vidioc_s_input;
32364+ *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32365+ *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32366+ *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32367
32368 if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_GRABBER))) {
32369 /* fixme: proper cleanup here */
32370diff -urNp linux-2.6.32.45/drivers/media/radio/radio-cadet.c linux-2.6.32.45/drivers/media/radio/radio-cadet.c
32371--- linux-2.6.32.45/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
32372+++ linux-2.6.32.45/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
32373@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
32374 while (i < count && dev->rdsin != dev->rdsout)
32375 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
32376
32377- if (copy_to_user(data, readbuf, i))
32378+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
32379 return -EFAULT;
32380 return i;
32381 }
32382diff -urNp linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c
32383--- linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
32384+++ linux-2.6.32.45/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
32385@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
32386
32387 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
32388
32389-static atomic_t cx18_instance = ATOMIC_INIT(0);
32390+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
32391
32392 /* Parameter declarations */
32393 static int cardtype[CX18_MAX_CARDS];
32394@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
32395 struct i2c_client c;
32396 u8 eedata[256];
32397
32398+ pax_track_stack();
32399+
32400 memset(&c, 0, sizeof(c));
32401 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
32402 c.adapter = &cx->i2c_adap[0];
32403@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
32404 struct cx18 *cx;
32405
32406 /* FIXME - module parameter arrays constrain max instances */
32407- i = atomic_inc_return(&cx18_instance) - 1;
32408+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
32409 if (i >= CX18_MAX_CARDS) {
32410 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
32411 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
32412diff -urNp linux-2.6.32.45/drivers/media/video/hexium_gemini.c linux-2.6.32.45/drivers/media/video/hexium_gemini.c
32413--- linux-2.6.32.45/drivers/media/video/hexium_gemini.c 2011-03-27 14:31:47.000000000 -0400
32414+++ linux-2.6.32.45/drivers/media/video/hexium_gemini.c 2011-08-05 20:33:55.000000000 -0400
32415@@ -394,12 +394,12 @@ static int hexium_attach(struct saa7146_
32416 hexium->cur_input = 0;
32417
32418 saa7146_vv_init(dev, &vv_data);
32419- vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32420- vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32421- vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32422- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32423- vv_data.ops.vidioc_g_input = vidioc_g_input;
32424- vv_data.ops.vidioc_s_input = vidioc_s_input;
32425+ *(void **)&vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32426+ *(void **)&vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32427+ *(void **)&vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32428+ *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32429+ *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32430+ *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32431 if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER)) {
32432 printk("hexium_gemini: cannot register capture v4l2 device. skipping.\n");
32433 return -1;
32434diff -urNp linux-2.6.32.45/drivers/media/video/hexium_orion.c linux-2.6.32.45/drivers/media/video/hexium_orion.c
32435--- linux-2.6.32.45/drivers/media/video/hexium_orion.c 2011-03-27 14:31:47.000000000 -0400
32436+++ linux-2.6.32.45/drivers/media/video/hexium_orion.c 2011-08-05 20:33:55.000000000 -0400
32437@@ -369,9 +369,9 @@ static int hexium_attach(struct saa7146_
32438 DEB_EE((".\n"));
32439
32440 saa7146_vv_init(dev, &vv_data);
32441- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32442- vv_data.ops.vidioc_g_input = vidioc_g_input;
32443- vv_data.ops.vidioc_s_input = vidioc_s_input;
32444+ *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32445+ *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32446+ *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32447 if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium orion", VFL_TYPE_GRABBER)) {
32448 printk("hexium_orion: cannot register capture v4l2 device. skipping.\n");
32449 return -1;
32450diff -urNp linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c
32451--- linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
32452+++ linux-2.6.32.45/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
32453@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
32454 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
32455
32456 /* ivtv instance counter */
32457-static atomic_t ivtv_instance = ATOMIC_INIT(0);
32458+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
32459
32460 /* Parameter declarations */
32461 static int cardtype[IVTV_MAX_CARDS];
32462diff -urNp linux-2.6.32.45/drivers/media/video/mxb.c linux-2.6.32.45/drivers/media/video/mxb.c
32463--- linux-2.6.32.45/drivers/media/video/mxb.c 2011-03-27 14:31:47.000000000 -0400
32464+++ linux-2.6.32.45/drivers/media/video/mxb.c 2011-08-05 20:33:55.000000000 -0400
32465@@ -703,23 +703,23 @@ static int mxb_attach(struct saa7146_dev
32466 already did this in "mxb_vl42_probe" */
32467
32468 saa7146_vv_init(dev, &vv_data);
32469- vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32470- vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32471- vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32472- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32473- vv_data.ops.vidioc_g_input = vidioc_g_input;
32474- vv_data.ops.vidioc_s_input = vidioc_s_input;
32475- vv_data.ops.vidioc_g_tuner = vidioc_g_tuner;
32476- vv_data.ops.vidioc_s_tuner = vidioc_s_tuner;
32477- vv_data.ops.vidioc_g_frequency = vidioc_g_frequency;
32478- vv_data.ops.vidioc_s_frequency = vidioc_s_frequency;
32479- vv_data.ops.vidioc_g_audio = vidioc_g_audio;
32480- vv_data.ops.vidioc_s_audio = vidioc_s_audio;
32481+ *(void **)&vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32482+ *(void **)&vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32483+ *(void **)&vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32484+ *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32485+ *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32486+ *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32487+ *(void **)&vv_data.ops.vidioc_g_tuner = vidioc_g_tuner;
32488+ *(void **)&vv_data.ops.vidioc_s_tuner = vidioc_s_tuner;
32489+ *(void **)&vv_data.ops.vidioc_g_frequency = vidioc_g_frequency;
32490+ *(void **)&vv_data.ops.vidioc_s_frequency = vidioc_s_frequency;
32491+ *(void **)&vv_data.ops.vidioc_g_audio = vidioc_g_audio;
32492+ *(void **)&vv_data.ops.vidioc_s_audio = vidioc_s_audio;
32493 #ifdef CONFIG_VIDEO_ADV_DEBUG
32494- vv_data.ops.vidioc_g_register = vidioc_g_register;
32495- vv_data.ops.vidioc_s_register = vidioc_s_register;
32496+ *(void **)&vv_data.ops.vidioc_g_register = vidioc_g_register;
32497+ *(void **)&vv_data.ops.vidioc_s_register = vidioc_s_register;
32498 #endif
32499- vv_data.ops.vidioc_default = vidioc_default;
32500+ *(void **)&vv_data.ops.vidioc_default = vidioc_default;
32501 if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) {
32502 ERR(("cannot register capture v4l2 device. skipping.\n"));
32503 return -1;
32504diff -urNp linux-2.6.32.45/drivers/media/video/omap24xxcam.c linux-2.6.32.45/drivers/media/video/omap24xxcam.c
32505--- linux-2.6.32.45/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
32506+++ linux-2.6.32.45/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
32507@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
32508 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
32509
32510 do_gettimeofday(&vb->ts);
32511- vb->field_count = atomic_add_return(2, &fh->field_count);
32512+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
32513 if (csr & csr_error) {
32514 vb->state = VIDEOBUF_ERROR;
32515 if (!atomic_read(&fh->cam->in_reset)) {
32516diff -urNp linux-2.6.32.45/drivers/media/video/omap24xxcam.h linux-2.6.32.45/drivers/media/video/omap24xxcam.h
32517--- linux-2.6.32.45/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
32518+++ linux-2.6.32.45/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
32519@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
32520 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
32521 struct videobuf_queue vbq;
32522 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
32523- atomic_t field_count; /* field counter for videobuf_buffer */
32524+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
32525 /* accessing cam here doesn't need serialisation: it's constant */
32526 struct omap24xxcam_device *cam;
32527 };
32528diff -urNp linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
32529--- linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
32530+++ linux-2.6.32.45/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
32531@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
32532 u8 *eeprom;
32533 struct tveeprom tvdata;
32534
32535+ pax_track_stack();
32536+
32537 memset(&tvdata,0,sizeof(tvdata));
32538
32539 eeprom = pvr2_eeprom_fetch(hdw);
32540diff -urNp linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c
32541--- linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
32542+++ linux-2.6.32.45/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
32543@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
32544 unsigned char localPAT[256];
32545 unsigned char localPMT[256];
32546
32547+ pax_track_stack();
32548+
32549 /* Set video format - must be done first as it resets other settings */
32550 set_reg8(client, 0x41, h->video_format);
32551
32552diff -urNp linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c
32553--- linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
32554+++ linux-2.6.32.45/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
32555@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
32556 wait_queue_head_t *q = 0;
32557 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32558
32559+ pax_track_stack();
32560+
32561 /* While any outstand message on the bus exists... */
32562 do {
32563
32564@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
32565 u8 tmp[512];
32566 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32567
32568+ pax_track_stack();
32569+
32570 while (loop) {
32571
32572 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
32573diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c
32574--- linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c 2011-03-27 14:31:47.000000000 -0400
32575+++ linux-2.6.32.45/drivers/media/video/usbvideo/ibmcam.c 2011-08-05 20:33:55.000000000 -0400
32576@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] =
32577 static int __init ibmcam_init(void)
32578 {
32579 struct usbvideo_cb cbTbl;
32580- memset(&cbTbl, 0, sizeof(cbTbl));
32581- cbTbl.probe = ibmcam_probe;
32582- cbTbl.setupOnOpen = ibmcam_setup_on_open;
32583- cbTbl.videoStart = ibmcam_video_start;
32584- cbTbl.videoStop = ibmcam_video_stop;
32585- cbTbl.processData = ibmcam_ProcessIsocData;
32586- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32587- cbTbl.adjustPicture = ibmcam_adjust_picture;
32588- cbTbl.getFPS = ibmcam_calculate_fps;
32589+ memset((void *)&cbTbl, 0, sizeof(cbTbl));
32590+ *(void **)&cbTbl.probe = ibmcam_probe;
32591+ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
32592+ *(void **)&cbTbl.videoStart = ibmcam_video_start;
32593+ *(void **)&cbTbl.videoStop = ibmcam_video_stop;
32594+ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
32595+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32596+ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
32597+ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
32598 return usbvideo_register(
32599 &cams,
32600 MAX_IBMCAM,
32601diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c
32602--- linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
32603+++ linux-2.6.32.45/drivers/media/video/usbvideo/konicawc.c 2011-08-05 20:33:55.000000000 -0400
32604@@ -225,7 +225,7 @@ static void konicawc_register_input(stru
32605 int error;
32606
32607 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32608- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32609+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32610
32611 cam->input = input_dev = input_allocate_device();
32612 if (!input_dev) {
32613@@ -935,16 +935,16 @@ static int __init konicawc_init(void)
32614 struct usbvideo_cb cbTbl;
32615 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
32616 DRIVER_DESC "\n");
32617- memset(&cbTbl, 0, sizeof(cbTbl));
32618- cbTbl.probe = konicawc_probe;
32619- cbTbl.setupOnOpen = konicawc_setup_on_open;
32620- cbTbl.processData = konicawc_process_isoc;
32621- cbTbl.getFPS = konicawc_calculate_fps;
32622- cbTbl.setVideoMode = konicawc_set_video_mode;
32623- cbTbl.startDataPump = konicawc_start_data;
32624- cbTbl.stopDataPump = konicawc_stop_data;
32625- cbTbl.adjustPicture = konicawc_adjust_picture;
32626- cbTbl.userFree = konicawc_free_uvd;
32627+ memset((void * )&cbTbl, 0, sizeof(cbTbl));
32628+ *(void **)&cbTbl.probe = konicawc_probe;
32629+ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
32630+ *(void **)&cbTbl.processData = konicawc_process_isoc;
32631+ *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
32632+ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
32633+ *(void **)&cbTbl.startDataPump = konicawc_start_data;
32634+ *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
32635+ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
32636+ *(void **)&cbTbl.userFree = konicawc_free_uvd;
32637 return usbvideo_register(
32638 &cams,
32639 MAX_CAMERAS,
32640diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c
32641--- linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
32642+++ linux-2.6.32.45/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
32643@@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
32644 int error;
32645
32646 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32647- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32648+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32649
32650 cam->input = input_dev = input_allocate_device();
32651 if (!input_dev) {
32652diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c
32653--- linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c 2011-03-27 14:31:47.000000000 -0400
32654+++ linux-2.6.32.45/drivers/media/video/usbvideo/ultracam.c 2011-08-05 20:33:55.000000000 -0400
32655@@ -655,14 +655,14 @@ static int __init ultracam_init(void)
32656 {
32657 struct usbvideo_cb cbTbl;
32658 memset(&cbTbl, 0, sizeof(cbTbl));
32659- cbTbl.probe = ultracam_probe;
32660- cbTbl.setupOnOpen = ultracam_setup_on_open;
32661- cbTbl.videoStart = ultracam_video_start;
32662- cbTbl.videoStop = ultracam_video_stop;
32663- cbTbl.processData = ultracam_ProcessIsocData;
32664- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32665- cbTbl.adjustPicture = ultracam_adjust_picture;
32666- cbTbl.getFPS = ultracam_calculate_fps;
32667+ *(void **)&cbTbl.probe = ultracam_probe;
32668+ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
32669+ *(void **)&cbTbl.videoStart = ultracam_video_start;
32670+ *(void **)&cbTbl.videoStop = ultracam_video_stop;
32671+ *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
32672+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32673+ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
32674+ *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
32675 return usbvideo_register(
32676 &cams,
32677 MAX_CAMERAS,
32678diff -urNp linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c
32679--- linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c 2011-03-27 14:31:47.000000000 -0400
32680+++ linux-2.6.32.45/drivers/media/video/usbvideo/usbvideo.c 2011-08-05 20:33:55.000000000 -0400
32681@@ -697,15 +697,15 @@ int usbvideo_register(
32682 __func__, cams, base_size, num_cams);
32683
32684 /* Copy callbacks, apply defaults for those that are not set */
32685- memmove(&cams->cb, cbTbl, sizeof(cams->cb));
32686+ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
32687 if (cams->cb.getFrame == NULL)
32688- cams->cb.getFrame = usbvideo_GetFrame;
32689+ *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
32690 if (cams->cb.disconnect == NULL)
32691- cams->cb.disconnect = usbvideo_Disconnect;
32692+ *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
32693 if (cams->cb.startDataPump == NULL)
32694- cams->cb.startDataPump = usbvideo_StartDataPump;
32695+ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
32696 if (cams->cb.stopDataPump == NULL)
32697- cams->cb.stopDataPump = usbvideo_StopDataPump;
32698+ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
32699
32700 cams->num_cameras = num_cams;
32701 cams->cam = (struct uvd *) &cams[1];
32702diff -urNp linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c
32703--- linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
32704+++ linux-2.6.32.45/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
32705@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
32706 unsigned char rv, gv, bv;
32707 static unsigned char *Y, *U, *V;
32708
32709+ pax_track_stack();
32710+
32711 frame = usbvision->curFrame;
32712 imageSize = frame->frmwidth * frame->frmheight;
32713 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
32714diff -urNp linux-2.6.32.45/drivers/media/video/v4l2-device.c linux-2.6.32.45/drivers/media/video/v4l2-device.c
32715--- linux-2.6.32.45/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
32716+++ linux-2.6.32.45/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
32717@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
32718 EXPORT_SYMBOL_GPL(v4l2_device_register);
32719
32720 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
32721- atomic_t *instance)
32722+ atomic_unchecked_t *instance)
32723 {
32724- int num = atomic_inc_return(instance) - 1;
32725+ int num = atomic_inc_return_unchecked(instance) - 1;
32726 int len = strlen(basename);
32727
32728 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
32729diff -urNp linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c
32730--- linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
32731+++ linux-2.6.32.45/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
32732@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
32733 {
32734 struct videobuf_queue q;
32735
32736+ pax_track_stack();
32737+
32738 /* Required to make generic handler to call __videobuf_alloc */
32739 q.int_ops = &sg_ops;
32740
32741diff -urNp linux-2.6.32.45/drivers/message/fusion/mptbase.c linux-2.6.32.45/drivers/message/fusion/mptbase.c
32742--- linux-2.6.32.45/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
32743+++ linux-2.6.32.45/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
32744@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
32745 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
32746 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
32747
32748+#ifdef CONFIG_GRKERNSEC_HIDESYM
32749+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32750+ NULL, NULL);
32751+#else
32752 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32753 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
32754+#endif
32755+
32756 /*
32757 * Rounding UP to nearest 4-kB boundary here...
32758 */
32759diff -urNp linux-2.6.32.45/drivers/message/fusion/mptsas.c linux-2.6.32.45/drivers/message/fusion/mptsas.c
32760--- linux-2.6.32.45/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
32761+++ linux-2.6.32.45/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
32762@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
32763 return 0;
32764 }
32765
32766+static inline void
32767+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32768+{
32769+ if (phy_info->port_details) {
32770+ phy_info->port_details->rphy = rphy;
32771+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32772+ ioc->name, rphy));
32773+ }
32774+
32775+ if (rphy) {
32776+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32777+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32778+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32779+ ioc->name, rphy, rphy->dev.release));
32780+ }
32781+}
32782+
32783 /* no mutex */
32784 static void
32785 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
32786@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
32787 return NULL;
32788 }
32789
32790-static inline void
32791-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32792-{
32793- if (phy_info->port_details) {
32794- phy_info->port_details->rphy = rphy;
32795- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32796- ioc->name, rphy));
32797- }
32798-
32799- if (rphy) {
32800- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32801- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32802- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32803- ioc->name, rphy, rphy->dev.release));
32804- }
32805-}
32806-
32807 static inline struct sas_port *
32808 mptsas_get_port(struct mptsas_phyinfo *phy_info)
32809 {
32810diff -urNp linux-2.6.32.45/drivers/message/fusion/mptscsih.c linux-2.6.32.45/drivers/message/fusion/mptscsih.c
32811--- linux-2.6.32.45/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
32812+++ linux-2.6.32.45/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
32813@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
32814
32815 h = shost_priv(SChost);
32816
32817- if (h) {
32818- if (h->info_kbuf == NULL)
32819- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32820- return h->info_kbuf;
32821- h->info_kbuf[0] = '\0';
32822+ if (!h)
32823+ return NULL;
32824
32825- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32826- h->info_kbuf[size-1] = '\0';
32827- }
32828+ if (h->info_kbuf == NULL)
32829+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32830+ return h->info_kbuf;
32831+ h->info_kbuf[0] = '\0';
32832+
32833+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32834+ h->info_kbuf[size-1] = '\0';
32835
32836 return h->info_kbuf;
32837 }
32838diff -urNp linux-2.6.32.45/drivers/message/i2o/i2o_config.c linux-2.6.32.45/drivers/message/i2o/i2o_config.c
32839--- linux-2.6.32.45/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
32840+++ linux-2.6.32.45/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
32841@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
32842 struct i2o_message *msg;
32843 unsigned int iop;
32844
32845+ pax_track_stack();
32846+
32847 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
32848 return -EFAULT;
32849
32850diff -urNp linux-2.6.32.45/drivers/message/i2o/i2o_proc.c linux-2.6.32.45/drivers/message/i2o/i2o_proc.c
32851--- linux-2.6.32.45/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
32852+++ linux-2.6.32.45/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
32853@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
32854 "Array Controller Device"
32855 };
32856
32857-static char *chtostr(u8 * chars, int n)
32858-{
32859- char tmp[256];
32860- tmp[0] = 0;
32861- return strncat(tmp, (char *)chars, n);
32862-}
32863-
32864 static int i2o_report_query_status(struct seq_file *seq, int block_status,
32865 char *group)
32866 {
32867@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
32868
32869 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
32870 seq_printf(seq, "%-#8x", ddm_table.module_id);
32871- seq_printf(seq, "%-29s",
32872- chtostr(ddm_table.module_name_version, 28));
32873+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
32874 seq_printf(seq, "%9d ", ddm_table.data_size);
32875 seq_printf(seq, "%8d", ddm_table.code_size);
32876
32877@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
32878
32879 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
32880 seq_printf(seq, "%-#8x", dst->module_id);
32881- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
32882- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
32883+ seq_printf(seq, "%-.28s", dst->module_name_version);
32884+ seq_printf(seq, "%-.8s", dst->date);
32885 seq_printf(seq, "%8d ", dst->module_size);
32886 seq_printf(seq, "%8d ", dst->mpb_size);
32887 seq_printf(seq, "0x%04x", dst->module_flags);
32888@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
32889 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
32890 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
32891 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
32892- seq_printf(seq, "Vendor info : %s\n",
32893- chtostr((u8 *) (work32 + 2), 16));
32894- seq_printf(seq, "Product info : %s\n",
32895- chtostr((u8 *) (work32 + 6), 16));
32896- seq_printf(seq, "Description : %s\n",
32897- chtostr((u8 *) (work32 + 10), 16));
32898- seq_printf(seq, "Product rev. : %s\n",
32899- chtostr((u8 *) (work32 + 14), 8));
32900+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
32901+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
32902+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
32903+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
32904
32905 seq_printf(seq, "Serial number : ");
32906 print_serial_number(seq, (u8 *) (work32 + 16),
32907@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
32908 }
32909
32910 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
32911- seq_printf(seq, "Module name : %s\n",
32912- chtostr(result.module_name, 24));
32913- seq_printf(seq, "Module revision : %s\n",
32914- chtostr(result.module_rev, 8));
32915+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
32916+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
32917
32918 seq_printf(seq, "Serial number : ");
32919 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
32920@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
32921 return 0;
32922 }
32923
32924- seq_printf(seq, "Device name : %s\n",
32925- chtostr(result.device_name, 64));
32926- seq_printf(seq, "Service name : %s\n",
32927- chtostr(result.service_name, 64));
32928- seq_printf(seq, "Physical name : %s\n",
32929- chtostr(result.physical_location, 64));
32930- seq_printf(seq, "Instance number : %s\n",
32931- chtostr(result.instance_number, 4));
32932+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
32933+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
32934+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32935+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32936
32937 return 0;
32938 }
32939diff -urNp linux-2.6.32.45/drivers/message/i2o/iop.c linux-2.6.32.45/drivers/message/i2o/iop.c
32940--- linux-2.6.32.45/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
32941+++ linux-2.6.32.45/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
32942@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
32943
32944 spin_lock_irqsave(&c->context_list_lock, flags);
32945
32946- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32947- atomic_inc(&c->context_list_counter);
32948+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32949+ atomic_inc_unchecked(&c->context_list_counter);
32950
32951- entry->context = atomic_read(&c->context_list_counter);
32952+ entry->context = atomic_read_unchecked(&c->context_list_counter);
32953
32954 list_add(&entry->list, &c->context_list);
32955
32956@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
32957
32958 #if BITS_PER_LONG == 64
32959 spin_lock_init(&c->context_list_lock);
32960- atomic_set(&c->context_list_counter, 0);
32961+ atomic_set_unchecked(&c->context_list_counter, 0);
32962 INIT_LIST_HEAD(&c->context_list);
32963 #endif
32964
32965diff -urNp linux-2.6.32.45/drivers/mfd/wm8350-i2c.c linux-2.6.32.45/drivers/mfd/wm8350-i2c.c
32966--- linux-2.6.32.45/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
32967+++ linux-2.6.32.45/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
32968@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
32969 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
32970 int ret;
32971
32972+ pax_track_stack();
32973+
32974 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
32975 return -EINVAL;
32976
32977diff -urNp linux-2.6.32.45/drivers/misc/kgdbts.c linux-2.6.32.45/drivers/misc/kgdbts.c
32978--- linux-2.6.32.45/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
32979+++ linux-2.6.32.45/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
32980@@ -118,7 +118,7 @@
32981 } while (0)
32982 #define MAX_CONFIG_LEN 40
32983
32984-static struct kgdb_io kgdbts_io_ops;
32985+static const struct kgdb_io kgdbts_io_ops;
32986 static char get_buf[BUFMAX];
32987 static int get_buf_cnt;
32988 static char put_buf[BUFMAX];
32989@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
32990 module_put(THIS_MODULE);
32991 }
32992
32993-static struct kgdb_io kgdbts_io_ops = {
32994+static const struct kgdb_io kgdbts_io_ops = {
32995 .name = "kgdbts",
32996 .read_char = kgdbts_get_char,
32997 .write_char = kgdbts_put_char,
32998diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c
32999--- linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
33000+++ linux-2.6.32.45/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
33001@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
33002
33003 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
33004 {
33005- atomic_long_inc(&mcs_op_statistics[op].count);
33006- atomic_long_add(clks, &mcs_op_statistics[op].total);
33007+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
33008+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
33009 if (mcs_op_statistics[op].max < clks)
33010 mcs_op_statistics[op].max = clks;
33011 }
33012diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c
33013--- linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
33014+++ linux-2.6.32.45/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
33015@@ -32,9 +32,9 @@
33016
33017 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
33018
33019-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
33020+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
33021 {
33022- unsigned long val = atomic_long_read(v);
33023+ unsigned long val = atomic_long_read_unchecked(v);
33024
33025 if (val)
33026 seq_printf(s, "%16lu %s\n", val, id);
33027@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
33028 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
33029
33030 for (op = 0; op < mcsop_last; op++) {
33031- count = atomic_long_read(&mcs_op_statistics[op].count);
33032- total = atomic_long_read(&mcs_op_statistics[op].total);
33033+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
33034+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
33035 max = mcs_op_statistics[op].max;
33036 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
33037 count ? total / count : 0, max);
33038diff -urNp linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h
33039--- linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
33040+++ linux-2.6.32.45/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
33041@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
33042 * GRU statistics.
33043 */
33044 struct gru_stats_s {
33045- atomic_long_t vdata_alloc;
33046- atomic_long_t vdata_free;
33047- atomic_long_t gts_alloc;
33048- atomic_long_t gts_free;
33049- atomic_long_t vdata_double_alloc;
33050- atomic_long_t gts_double_allocate;
33051- atomic_long_t assign_context;
33052- atomic_long_t assign_context_failed;
33053- atomic_long_t free_context;
33054- atomic_long_t load_user_context;
33055- atomic_long_t load_kernel_context;
33056- atomic_long_t lock_kernel_context;
33057- atomic_long_t unlock_kernel_context;
33058- atomic_long_t steal_user_context;
33059- atomic_long_t steal_kernel_context;
33060- atomic_long_t steal_context_failed;
33061- atomic_long_t nopfn;
33062- atomic_long_t break_cow;
33063- atomic_long_t asid_new;
33064- atomic_long_t asid_next;
33065- atomic_long_t asid_wrap;
33066- atomic_long_t asid_reuse;
33067- atomic_long_t intr;
33068- atomic_long_t intr_mm_lock_failed;
33069- atomic_long_t call_os;
33070- atomic_long_t call_os_offnode_reference;
33071- atomic_long_t call_os_check_for_bug;
33072- atomic_long_t call_os_wait_queue;
33073- atomic_long_t user_flush_tlb;
33074- atomic_long_t user_unload_context;
33075- atomic_long_t user_exception;
33076- atomic_long_t set_context_option;
33077- atomic_long_t migrate_check;
33078- atomic_long_t migrated_retarget;
33079- atomic_long_t migrated_unload;
33080- atomic_long_t migrated_unload_delay;
33081- atomic_long_t migrated_nopfn_retarget;
33082- atomic_long_t migrated_nopfn_unload;
33083- atomic_long_t tlb_dropin;
33084- atomic_long_t tlb_dropin_fail_no_asid;
33085- atomic_long_t tlb_dropin_fail_upm;
33086- atomic_long_t tlb_dropin_fail_invalid;
33087- atomic_long_t tlb_dropin_fail_range_active;
33088- atomic_long_t tlb_dropin_fail_idle;
33089- atomic_long_t tlb_dropin_fail_fmm;
33090- atomic_long_t tlb_dropin_fail_no_exception;
33091- atomic_long_t tlb_dropin_fail_no_exception_war;
33092- atomic_long_t tfh_stale_on_fault;
33093- atomic_long_t mmu_invalidate_range;
33094- atomic_long_t mmu_invalidate_page;
33095- atomic_long_t mmu_clear_flush_young;
33096- atomic_long_t flush_tlb;
33097- atomic_long_t flush_tlb_gru;
33098- atomic_long_t flush_tlb_gru_tgh;
33099- atomic_long_t flush_tlb_gru_zero_asid;
33100-
33101- atomic_long_t copy_gpa;
33102-
33103- atomic_long_t mesq_receive;
33104- atomic_long_t mesq_receive_none;
33105- atomic_long_t mesq_send;
33106- atomic_long_t mesq_send_failed;
33107- atomic_long_t mesq_noop;
33108- atomic_long_t mesq_send_unexpected_error;
33109- atomic_long_t mesq_send_lb_overflow;
33110- atomic_long_t mesq_send_qlimit_reached;
33111- atomic_long_t mesq_send_amo_nacked;
33112- atomic_long_t mesq_send_put_nacked;
33113- atomic_long_t mesq_qf_not_full;
33114- atomic_long_t mesq_qf_locked;
33115- atomic_long_t mesq_qf_noop_not_full;
33116- atomic_long_t mesq_qf_switch_head_failed;
33117- atomic_long_t mesq_qf_unexpected_error;
33118- atomic_long_t mesq_noop_unexpected_error;
33119- atomic_long_t mesq_noop_lb_overflow;
33120- atomic_long_t mesq_noop_qlimit_reached;
33121- atomic_long_t mesq_noop_amo_nacked;
33122- atomic_long_t mesq_noop_put_nacked;
33123+ atomic_long_unchecked_t vdata_alloc;
33124+ atomic_long_unchecked_t vdata_free;
33125+ atomic_long_unchecked_t gts_alloc;
33126+ atomic_long_unchecked_t gts_free;
33127+ atomic_long_unchecked_t vdata_double_alloc;
33128+ atomic_long_unchecked_t gts_double_allocate;
33129+ atomic_long_unchecked_t assign_context;
33130+ atomic_long_unchecked_t assign_context_failed;
33131+ atomic_long_unchecked_t free_context;
33132+ atomic_long_unchecked_t load_user_context;
33133+ atomic_long_unchecked_t load_kernel_context;
33134+ atomic_long_unchecked_t lock_kernel_context;
33135+ atomic_long_unchecked_t unlock_kernel_context;
33136+ atomic_long_unchecked_t steal_user_context;
33137+ atomic_long_unchecked_t steal_kernel_context;
33138+ atomic_long_unchecked_t steal_context_failed;
33139+ atomic_long_unchecked_t nopfn;
33140+ atomic_long_unchecked_t break_cow;
33141+ atomic_long_unchecked_t asid_new;
33142+ atomic_long_unchecked_t asid_next;
33143+ atomic_long_unchecked_t asid_wrap;
33144+ atomic_long_unchecked_t asid_reuse;
33145+ atomic_long_unchecked_t intr;
33146+ atomic_long_unchecked_t intr_mm_lock_failed;
33147+ atomic_long_unchecked_t call_os;
33148+ atomic_long_unchecked_t call_os_offnode_reference;
33149+ atomic_long_unchecked_t call_os_check_for_bug;
33150+ atomic_long_unchecked_t call_os_wait_queue;
33151+ atomic_long_unchecked_t user_flush_tlb;
33152+ atomic_long_unchecked_t user_unload_context;
33153+ atomic_long_unchecked_t user_exception;
33154+ atomic_long_unchecked_t set_context_option;
33155+ atomic_long_unchecked_t migrate_check;
33156+ atomic_long_unchecked_t migrated_retarget;
33157+ atomic_long_unchecked_t migrated_unload;
33158+ atomic_long_unchecked_t migrated_unload_delay;
33159+ atomic_long_unchecked_t migrated_nopfn_retarget;
33160+ atomic_long_unchecked_t migrated_nopfn_unload;
33161+ atomic_long_unchecked_t tlb_dropin;
33162+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
33163+ atomic_long_unchecked_t tlb_dropin_fail_upm;
33164+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
33165+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
33166+ atomic_long_unchecked_t tlb_dropin_fail_idle;
33167+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
33168+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
33169+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
33170+ atomic_long_unchecked_t tfh_stale_on_fault;
33171+ atomic_long_unchecked_t mmu_invalidate_range;
33172+ atomic_long_unchecked_t mmu_invalidate_page;
33173+ atomic_long_unchecked_t mmu_clear_flush_young;
33174+ atomic_long_unchecked_t flush_tlb;
33175+ atomic_long_unchecked_t flush_tlb_gru;
33176+ atomic_long_unchecked_t flush_tlb_gru_tgh;
33177+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
33178+
33179+ atomic_long_unchecked_t copy_gpa;
33180+
33181+ atomic_long_unchecked_t mesq_receive;
33182+ atomic_long_unchecked_t mesq_receive_none;
33183+ atomic_long_unchecked_t mesq_send;
33184+ atomic_long_unchecked_t mesq_send_failed;
33185+ atomic_long_unchecked_t mesq_noop;
33186+ atomic_long_unchecked_t mesq_send_unexpected_error;
33187+ atomic_long_unchecked_t mesq_send_lb_overflow;
33188+ atomic_long_unchecked_t mesq_send_qlimit_reached;
33189+ atomic_long_unchecked_t mesq_send_amo_nacked;
33190+ atomic_long_unchecked_t mesq_send_put_nacked;
33191+ atomic_long_unchecked_t mesq_qf_not_full;
33192+ atomic_long_unchecked_t mesq_qf_locked;
33193+ atomic_long_unchecked_t mesq_qf_noop_not_full;
33194+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
33195+ atomic_long_unchecked_t mesq_qf_unexpected_error;
33196+ atomic_long_unchecked_t mesq_noop_unexpected_error;
33197+ atomic_long_unchecked_t mesq_noop_lb_overflow;
33198+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
33199+ atomic_long_unchecked_t mesq_noop_amo_nacked;
33200+ atomic_long_unchecked_t mesq_noop_put_nacked;
33201
33202 };
33203
33204@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
33205 cchop_deallocate, tghop_invalidate, mcsop_last};
33206
33207 struct mcs_op_statistic {
33208- atomic_long_t count;
33209- atomic_long_t total;
33210+ atomic_long_unchecked_t count;
33211+ atomic_long_unchecked_t total;
33212 unsigned long max;
33213 };
33214
33215@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
33216
33217 #define STAT(id) do { \
33218 if (gru_options & OPT_STATS) \
33219- atomic_long_inc(&gru_stats.id); \
33220+ atomic_long_inc_unchecked(&gru_stats.id); \
33221 } while (0)
33222
33223 #ifdef CONFIG_SGI_GRU_DEBUG
33224diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h
33225--- linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h 2011-03-27 14:31:47.000000000 -0400
33226+++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc.h 2011-08-05 20:33:55.000000000 -0400
33227@@ -876,7 +876,7 @@ extern struct xpc_registration xpc_regis
33228 /* found in xpc_main.c */
33229 extern struct device *xpc_part;
33230 extern struct device *xpc_chan;
33231-extern struct xpc_arch_operations xpc_arch_ops;
33232+extern const struct xpc_arch_operations xpc_arch_ops;
33233 extern int xpc_disengage_timelimit;
33234 extern int xpc_disengage_timedout;
33235 extern int xpc_activate_IRQ_rcvd;
33236diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c
33237--- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c 2011-03-27 14:31:47.000000000 -0400
33238+++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_main.c 2011-08-05 20:33:55.000000000 -0400
33239@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_not
33240 .notifier_call = xpc_system_die,
33241 };
33242
33243-struct xpc_arch_operations xpc_arch_ops;
33244+const struct xpc_arch_operations xpc_arch_ops;
33245
33246 /*
33247 * Timer function to enforce the timelimit on the partition disengage.
33248diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c
33249--- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c 2011-03-27 14:31:47.000000000 -0400
33250+++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_sn2.c 2011-08-05 20:33:55.000000000 -0400
33251@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_chan
33252 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
33253 }
33254
33255-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
33256+static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
33257 .setup_partitions = xpc_setup_partitions_sn2,
33258 .teardown_partitions = xpc_teardown_partitions_sn2,
33259 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
33260@@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
33261 int ret;
33262 size_t buf_size;
33263
33264- xpc_arch_ops = xpc_arch_ops_sn2;
33265+ pax_open_kernel();
33266+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
33267+ pax_close_kernel();
33268
33269 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
33270 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
33271diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c
33272--- linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c 2011-03-27 14:31:47.000000000 -0400
33273+++ linux-2.6.32.45/drivers/misc/sgi-xp/xpc_uv.c 2011-08-05 20:33:55.000000000 -0400
33274@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_chann
33275 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
33276 }
33277
33278-static struct xpc_arch_operations xpc_arch_ops_uv = {
33279+static const struct xpc_arch_operations xpc_arch_ops_uv = {
33280 .setup_partitions = xpc_setup_partitions_uv,
33281 .teardown_partitions = xpc_teardown_partitions_uv,
33282 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
33283@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_ar
33284 int
33285 xpc_init_uv(void)
33286 {
33287- xpc_arch_ops = xpc_arch_ops_uv;
33288+ pax_open_kernel();
33289+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
33290+ pax_close_kernel();
33291
33292 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
33293 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
33294diff -urNp linux-2.6.32.45/drivers/misc/sgi-xp/xp.h linux-2.6.32.45/drivers/misc/sgi-xp/xp.h
33295--- linux-2.6.32.45/drivers/misc/sgi-xp/xp.h 2011-03-27 14:31:47.000000000 -0400
33296+++ linux-2.6.32.45/drivers/misc/sgi-xp/xp.h 2011-08-05 20:33:55.000000000 -0400
33297@@ -289,7 +289,7 @@ struct xpc_interface {
33298 xpc_notify_func, void *);
33299 void (*received) (short, int, void *);
33300 enum xp_retval (*partid_to_nasids) (short, void *);
33301-};
33302+} __no_const;
33303
33304 extern struct xpc_interface xpc_interface;
33305
33306diff -urNp linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c
33307--- linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
33308+++ linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
33309@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
33310 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
33311 unsigned long timeo = jiffies + HZ;
33312
33313+ pax_track_stack();
33314+
33315 /* Prevent setting state FL_SYNCING for chip in suspended state. */
33316 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
33317 goto sleep;
33318@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
33319 unsigned long initial_adr;
33320 int initial_len = len;
33321
33322+ pax_track_stack();
33323+
33324 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
33325 adr += chip->start;
33326 initial_adr = adr;
33327@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
33328 int retries = 3;
33329 int ret;
33330
33331+ pax_track_stack();
33332+
33333 adr += chip->start;
33334
33335 retry:
33336diff -urNp linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c
33337--- linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
33338+++ linux-2.6.32.45/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
33339@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
33340 unsigned long cmd_addr;
33341 struct cfi_private *cfi = map->fldrv_priv;
33342
33343+ pax_track_stack();
33344+
33345 adr += chip->start;
33346
33347 /* Ensure cmd read/writes are aligned. */
33348@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
33349 DECLARE_WAITQUEUE(wait, current);
33350 int wbufsize, z;
33351
33352+ pax_track_stack();
33353+
33354 /* M58LW064A requires bus alignment for buffer wriets -- saw */
33355 if (adr & (map_bankwidth(map)-1))
33356 return -EINVAL;
33357@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
33358 DECLARE_WAITQUEUE(wait, current);
33359 int ret = 0;
33360
33361+ pax_track_stack();
33362+
33363 adr += chip->start;
33364
33365 /* Let's determine this according to the interleave only once */
33366@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
33367 unsigned long timeo = jiffies + HZ;
33368 DECLARE_WAITQUEUE(wait, current);
33369
33370+ pax_track_stack();
33371+
33372 adr += chip->start;
33373
33374 /* Let's determine this according to the interleave only once */
33375@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
33376 unsigned long timeo = jiffies + HZ;
33377 DECLARE_WAITQUEUE(wait, current);
33378
33379+ pax_track_stack();
33380+
33381 adr += chip->start;
33382
33383 /* Let's determine this according to the interleave only once */
33384diff -urNp linux-2.6.32.45/drivers/mtd/devices/doc2000.c linux-2.6.32.45/drivers/mtd/devices/doc2000.c
33385--- linux-2.6.32.45/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
33386+++ linux-2.6.32.45/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
33387@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
33388
33389 /* The ECC will not be calculated correctly if less than 512 is written */
33390 /* DBB-
33391- if (len != 0x200 && eccbuf)
33392+ if (len != 0x200)
33393 printk(KERN_WARNING
33394 "ECC needs a full sector write (adr: %lx size %lx)\n",
33395 (long) to, (long) len);
33396diff -urNp linux-2.6.32.45/drivers/mtd/devices/doc2001.c linux-2.6.32.45/drivers/mtd/devices/doc2001.c
33397--- linux-2.6.32.45/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
33398+++ linux-2.6.32.45/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
33399@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
33400 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
33401
33402 /* Don't allow read past end of device */
33403- if (from >= this->totlen)
33404+ if (from >= this->totlen || !len)
33405 return -EINVAL;
33406
33407 /* Don't allow a single read to cross a 512-byte block boundary */
33408diff -urNp linux-2.6.32.45/drivers/mtd/ftl.c linux-2.6.32.45/drivers/mtd/ftl.c
33409--- linux-2.6.32.45/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
33410+++ linux-2.6.32.45/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
33411@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
33412 loff_t offset;
33413 uint16_t srcunitswap = cpu_to_le16(srcunit);
33414
33415+ pax_track_stack();
33416+
33417 eun = &part->EUNInfo[srcunit];
33418 xfer = &part->XferInfo[xferunit];
33419 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
33420diff -urNp linux-2.6.32.45/drivers/mtd/inftlcore.c linux-2.6.32.45/drivers/mtd/inftlcore.c
33421--- linux-2.6.32.45/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
33422+++ linux-2.6.32.45/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
33423@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
33424 struct inftl_oob oob;
33425 size_t retlen;
33426
33427+ pax_track_stack();
33428+
33429 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
33430 "pending=%d)\n", inftl, thisVUC, pendingblock);
33431
33432diff -urNp linux-2.6.32.45/drivers/mtd/inftlmount.c linux-2.6.32.45/drivers/mtd/inftlmount.c
33433--- linux-2.6.32.45/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
33434+++ linux-2.6.32.45/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
33435@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
33436 struct INFTLPartition *ip;
33437 size_t retlen;
33438
33439+ pax_track_stack();
33440+
33441 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
33442
33443 /*
33444diff -urNp linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c
33445--- linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
33446+++ linux-2.6.32.45/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
33447@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
33448 {
33449 map_word pfow_val[4];
33450
33451+ pax_track_stack();
33452+
33453 /* Check identification string */
33454 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
33455 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
33456diff -urNp linux-2.6.32.45/drivers/mtd/mtdchar.c linux-2.6.32.45/drivers/mtd/mtdchar.c
33457--- linux-2.6.32.45/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
33458+++ linux-2.6.32.45/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
33459@@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
33460 u_long size;
33461 struct mtd_info_user info;
33462
33463+ pax_track_stack();
33464+
33465 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
33466
33467 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
33468diff -urNp linux-2.6.32.45/drivers/mtd/nftlcore.c linux-2.6.32.45/drivers/mtd/nftlcore.c
33469--- linux-2.6.32.45/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
33470+++ linux-2.6.32.45/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
33471@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
33472 int inplace = 1;
33473 size_t retlen;
33474
33475+ pax_track_stack();
33476+
33477 memset(BlockMap, 0xff, sizeof(BlockMap));
33478 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
33479
33480diff -urNp linux-2.6.32.45/drivers/mtd/nftlmount.c linux-2.6.32.45/drivers/mtd/nftlmount.c
33481--- linux-2.6.32.45/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
33482+++ linux-2.6.32.45/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
33483@@ -23,6 +23,7 @@
33484 #include <asm/errno.h>
33485 #include <linux/delay.h>
33486 #include <linux/slab.h>
33487+#include <linux/sched.h>
33488 #include <linux/mtd/mtd.h>
33489 #include <linux/mtd/nand.h>
33490 #include <linux/mtd/nftl.h>
33491@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
33492 struct mtd_info *mtd = nftl->mbd.mtd;
33493 unsigned int i;
33494
33495+ pax_track_stack();
33496+
33497 /* Assume logical EraseSize == physical erasesize for starting the scan.
33498 We'll sort it out later if we find a MediaHeader which says otherwise */
33499 /* Actually, we won't. The new DiskOnChip driver has already scanned
33500diff -urNp linux-2.6.32.45/drivers/mtd/ubi/build.c linux-2.6.32.45/drivers/mtd/ubi/build.c
33501--- linux-2.6.32.45/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
33502+++ linux-2.6.32.45/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
33503@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
33504 static int __init bytes_str_to_int(const char *str)
33505 {
33506 char *endp;
33507- unsigned long result;
33508+ unsigned long result, scale = 1;
33509
33510 result = simple_strtoul(str, &endp, 0);
33511 if (str == endp || result >= INT_MAX) {
33512@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
33513
33514 switch (*endp) {
33515 case 'G':
33516- result *= 1024;
33517+ scale *= 1024;
33518 case 'M':
33519- result *= 1024;
33520+ scale *= 1024;
33521 case 'K':
33522- result *= 1024;
33523+ scale *= 1024;
33524 if (endp[1] == 'i' && endp[2] == 'B')
33525 endp += 2;
33526 case '\0':
33527@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
33528 return -EINVAL;
33529 }
33530
33531- return result;
33532+ if ((intoverflow_t)result*scale >= INT_MAX) {
33533+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
33534+ str);
33535+ return -EINVAL;
33536+ }
33537+
33538+ return result*scale;
33539 }
33540
33541 /**
33542diff -urNp linux-2.6.32.45/drivers/net/bnx2.c linux-2.6.32.45/drivers/net/bnx2.c
33543--- linux-2.6.32.45/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
33544+++ linux-2.6.32.45/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
33545@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
33546 int rc = 0;
33547 u32 magic, csum;
33548
33549+ pax_track_stack();
33550+
33551 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
33552 goto test_nvram_done;
33553
33554diff -urNp linux-2.6.32.45/drivers/net/cxgb3/l2t.h linux-2.6.32.45/drivers/net/cxgb3/l2t.h
33555--- linux-2.6.32.45/drivers/net/cxgb3/l2t.h 2011-03-27 14:31:47.000000000 -0400
33556+++ linux-2.6.32.45/drivers/net/cxgb3/l2t.h 2011-08-05 20:33:55.000000000 -0400
33557@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
33558 */
33559 struct l2t_skb_cb {
33560 arp_failure_handler_func arp_failure_handler;
33561-};
33562+} __no_const;
33563
33564 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
33565
33566diff -urNp linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c
33567--- linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
33568+++ linux-2.6.32.45/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
33569@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
33570 int i, addr, ret;
33571 struct t3_vpd vpd;
33572
33573+ pax_track_stack();
33574+
33575 /*
33576 * Card information is normally at VPD_BASE but some early cards had
33577 * it at 0.
33578diff -urNp linux-2.6.32.45/drivers/net/e1000e/82571.c linux-2.6.32.45/drivers/net/e1000e/82571.c
33579--- linux-2.6.32.45/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
33580+++ linux-2.6.32.45/drivers/net/e1000e/82571.c 2011-08-05 20:33:55.000000000 -0400
33581@@ -245,22 +245,22 @@ static s32 e1000_init_mac_params_82571(s
33582 /* check for link */
33583 switch (hw->phy.media_type) {
33584 case e1000_media_type_copper:
33585- func->setup_physical_interface = e1000_setup_copper_link_82571;
33586- func->check_for_link = e1000e_check_for_copper_link;
33587- func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
33588+ *(void **)&func->setup_physical_interface = e1000_setup_copper_link_82571;
33589+ *(void **)&func->check_for_link = e1000e_check_for_copper_link;
33590+ *(void **)&func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
33591 break;
33592 case e1000_media_type_fiber:
33593- func->setup_physical_interface =
33594+ *(void **)&func->setup_physical_interface =
33595 e1000_setup_fiber_serdes_link_82571;
33596- func->check_for_link = e1000e_check_for_fiber_link;
33597- func->get_link_up_info =
33598+ *(void **)&func->check_for_link = e1000e_check_for_fiber_link;
33599+ *(void **)&func->get_link_up_info =
33600 e1000e_get_speed_and_duplex_fiber_serdes;
33601 break;
33602 case e1000_media_type_internal_serdes:
33603- func->setup_physical_interface =
33604+ *(void **)&func->setup_physical_interface =
33605 e1000_setup_fiber_serdes_link_82571;
33606- func->check_for_link = e1000_check_for_serdes_link_82571;
33607- func->get_link_up_info =
33608+ *(void **)&func->check_for_link = e1000_check_for_serdes_link_82571;
33609+ *(void **)&func->get_link_up_info =
33610 e1000e_get_speed_and_duplex_fiber_serdes;
33611 break;
33612 default:
33613@@ -271,12 +271,12 @@ static s32 e1000_init_mac_params_82571(s
33614 switch (hw->mac.type) {
33615 case e1000_82574:
33616 case e1000_82583:
33617- func->check_mng_mode = e1000_check_mng_mode_82574;
33618- func->led_on = e1000_led_on_82574;
33619+ *(void **)&func->check_mng_mode = e1000_check_mng_mode_82574;
33620+ *(void **)&func->led_on = e1000_led_on_82574;
33621 break;
33622 default:
33623- func->check_mng_mode = e1000e_check_mng_mode_generic;
33624- func->led_on = e1000e_led_on_generic;
33625+ *(void **)&func->check_mng_mode = e1000e_check_mng_mode_generic;
33626+ *(void **)&func->led_on = e1000e_led_on_generic;
33627 break;
33628 }
33629
33630@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(s
33631 temp = er32(ICRXDMTC);
33632 }
33633
33634-static struct e1000_mac_operations e82571_mac_ops = {
33635+static const struct e1000_mac_operations e82571_mac_ops = {
33636 /* .check_mng_mode: mac type dependent */
33637 /* .check_for_link: media type dependent */
33638 .id_led_init = e1000e_id_led_init,
33639@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e8257
33640 .setup_led = e1000e_setup_led_generic,
33641 };
33642
33643-static struct e1000_phy_operations e82_phy_ops_igp = {
33644+static const struct e1000_phy_operations e82_phy_ops_igp = {
33645 .acquire_phy = e1000_get_hw_semaphore_82571,
33646 .check_reset_block = e1000e_check_reset_block_generic,
33647 .commit_phy = NULL,
33648@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_p
33649 .cfg_on_link_up = NULL,
33650 };
33651
33652-static struct e1000_phy_operations e82_phy_ops_m88 = {
33653+static const struct e1000_phy_operations e82_phy_ops_m88 = {
33654 .acquire_phy = e1000_get_hw_semaphore_82571,
33655 .check_reset_block = e1000e_check_reset_block_generic,
33656 .commit_phy = e1000e_phy_sw_reset,
33657@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_p
33658 .cfg_on_link_up = NULL,
33659 };
33660
33661-static struct e1000_phy_operations e82_phy_ops_bm = {
33662+static const struct e1000_phy_operations e82_phy_ops_bm = {
33663 .acquire_phy = e1000_get_hw_semaphore_82571,
33664 .check_reset_block = e1000e_check_reset_block_generic,
33665 .commit_phy = e1000e_phy_sw_reset,
33666@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_p
33667 .cfg_on_link_up = NULL,
33668 };
33669
33670-static struct e1000_nvm_operations e82571_nvm_ops = {
33671+static const struct e1000_nvm_operations e82571_nvm_ops = {
33672 .acquire_nvm = e1000_acquire_nvm_82571,
33673 .read_nvm = e1000e_read_nvm_eerd,
33674 .release_nvm = e1000_release_nvm_82571,
33675diff -urNp linux-2.6.32.45/drivers/net/e1000e/e1000.h linux-2.6.32.45/drivers/net/e1000e/e1000.h
33676--- linux-2.6.32.45/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
33677+++ linux-2.6.32.45/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
33678@@ -375,9 +375,9 @@ struct e1000_info {
33679 u32 pba;
33680 u32 max_hw_frame_size;
33681 s32 (*get_variants)(struct e1000_adapter *);
33682- struct e1000_mac_operations *mac_ops;
33683- struct e1000_phy_operations *phy_ops;
33684- struct e1000_nvm_operations *nvm_ops;
33685+ const struct e1000_mac_operations *mac_ops;
33686+ const struct e1000_phy_operations *phy_ops;
33687+ const struct e1000_nvm_operations *nvm_ops;
33688 };
33689
33690 /* hardware capability, feature, and workaround flags */
33691diff -urNp linux-2.6.32.45/drivers/net/e1000e/es2lan.c linux-2.6.32.45/drivers/net/e1000e/es2lan.c
33692--- linux-2.6.32.45/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
33693+++ linux-2.6.32.45/drivers/net/e1000e/es2lan.c 2011-08-05 20:33:55.000000000 -0400
33694@@ -229,16 +229,16 @@ static s32 e1000_init_mac_params_80003es
33695 /* check for link */
33696 switch (hw->phy.media_type) {
33697 case e1000_media_type_copper:
33698- func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
33699- func->check_for_link = e1000e_check_for_copper_link;
33700+ *(void **)&func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
33701+ *(void **)&func->check_for_link = e1000e_check_for_copper_link;
33702 break;
33703 case e1000_media_type_fiber:
33704- func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33705- func->check_for_link = e1000e_check_for_fiber_link;
33706+ *(void **)&func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33707+ *(void **)&func->check_for_link = e1000e_check_for_fiber_link;
33708 break;
33709 case e1000_media_type_internal_serdes:
33710- func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33711- func->check_for_link = e1000e_check_for_serdes_link;
33712+ *(void **)&func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
33713+ *(void **)&func->check_for_link = e1000e_check_for_serdes_link;
33714 break;
33715 default:
33716 return -E1000_ERR_CONFIG;
33717@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es
33718 temp = er32(ICRXDMTC);
33719 }
33720
33721-static struct e1000_mac_operations es2_mac_ops = {
33722+static const struct e1000_mac_operations es2_mac_ops = {
33723 .id_led_init = e1000e_id_led_init,
33724 .check_mng_mode = e1000e_check_mng_mode_generic,
33725 /* check_for_link dependent on media type */
33726@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_m
33727 .setup_led = e1000e_setup_led_generic,
33728 };
33729
33730-static struct e1000_phy_operations es2_phy_ops = {
33731+static const struct e1000_phy_operations es2_phy_ops = {
33732 .acquire_phy = e1000_acquire_phy_80003es2lan,
33733 .check_reset_block = e1000e_check_reset_block_generic,
33734 .commit_phy = e1000e_phy_sw_reset,
33735@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_p
33736 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
33737 };
33738
33739-static struct e1000_nvm_operations es2_nvm_ops = {
33740+static const struct e1000_nvm_operations es2_nvm_ops = {
33741 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
33742 .read_nvm = e1000e_read_nvm_eerd,
33743 .release_nvm = e1000_release_nvm_80003es2lan,
33744diff -urNp linux-2.6.32.45/drivers/net/e1000e/hw.h linux-2.6.32.45/drivers/net/e1000e/hw.h
33745--- linux-2.6.32.45/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
33746+++ linux-2.6.32.45/drivers/net/e1000e/hw.h 2011-04-17 15:56:46.000000000 -0400
33747@@ -756,34 +756,34 @@ struct e1000_mac_operations {
33748
33749 /* Function pointers for the PHY. */
33750 struct e1000_phy_operations {
33751- s32 (*acquire_phy)(struct e1000_hw *);
33752- s32 (*check_polarity)(struct e1000_hw *);
33753- s32 (*check_reset_block)(struct e1000_hw *);
33754- s32 (*commit_phy)(struct e1000_hw *);
33755- s32 (*force_speed_duplex)(struct e1000_hw *);
33756- s32 (*get_cfg_done)(struct e1000_hw *hw);
33757- s32 (*get_cable_length)(struct e1000_hw *);
33758- s32 (*get_phy_info)(struct e1000_hw *);
33759- s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
33760- s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
33761- void (*release_phy)(struct e1000_hw *);
33762- s32 (*reset_phy)(struct e1000_hw *);
33763- s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
33764- s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
33765- s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
33766- s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
33767- s32 (*cfg_on_link_up)(struct e1000_hw *);
33768+ s32 (* acquire_phy)(struct e1000_hw *);
33769+ s32 (* check_polarity)(struct e1000_hw *);
33770+ s32 (* check_reset_block)(struct e1000_hw *);
33771+ s32 (* commit_phy)(struct e1000_hw *);
33772+ s32 (* force_speed_duplex)(struct e1000_hw *);
33773+ s32 (* get_cfg_done)(struct e1000_hw *hw);
33774+ s32 (* get_cable_length)(struct e1000_hw *);
33775+ s32 (* get_phy_info)(struct e1000_hw *);
33776+ s32 (* read_phy_reg)(struct e1000_hw *, u32, u16 *);
33777+ s32 (* read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
33778+ void (* release_phy)(struct e1000_hw *);
33779+ s32 (* reset_phy)(struct e1000_hw *);
33780+ s32 (* set_d0_lplu_state)(struct e1000_hw *, bool);
33781+ s32 (* set_d3_lplu_state)(struct e1000_hw *, bool);
33782+ s32 (* write_phy_reg)(struct e1000_hw *, u32, u16);
33783+ s32 (* write_phy_reg_locked)(struct e1000_hw *, u32, u16);
33784+ s32 (* cfg_on_link_up)(struct e1000_hw *);
33785 };
33786
33787 /* Function pointers for the NVM. */
33788 struct e1000_nvm_operations {
33789- s32 (*acquire_nvm)(struct e1000_hw *);
33790- s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
33791- void (*release_nvm)(struct e1000_hw *);
33792- s32 (*update_nvm)(struct e1000_hw *);
33793- s32 (*valid_led_default)(struct e1000_hw *, u16 *);
33794- s32 (*validate_nvm)(struct e1000_hw *);
33795- s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
33796+ s32 (* const acquire_nvm)(struct e1000_hw *);
33797+ s32 (* const read_nvm)(struct e1000_hw *, u16, u16, u16 *);
33798+ void (* const release_nvm)(struct e1000_hw *);
33799+ s32 (* const update_nvm)(struct e1000_hw *);
33800+ s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
33801+ s32 (* const validate_nvm)(struct e1000_hw *);
33802+ s32 (* const write_nvm)(struct e1000_hw *, u16, u16, u16 *);
33803 };
33804
33805 struct e1000_mac_info {
33806diff -urNp linux-2.6.32.45/drivers/net/e1000e/ich8lan.c linux-2.6.32.45/drivers/net/e1000e/ich8lan.c
33807--- linux-2.6.32.45/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
33808+++ linux-2.6.32.45/drivers/net/e1000e/ich8lan.c 2011-08-05 20:33:55.000000000 -0400
33809@@ -265,13 +265,13 @@ static s32 e1000_init_phy_params_pchlan(
33810 phy->addr = 1;
33811 phy->reset_delay_us = 100;
33812
33813- phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33814- phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
33815- phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
33816- phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
33817- phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
33818- phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
33819- phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
33820+ *(void **)&phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33821+ *(void **)&phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
33822+ *(void **)&phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
33823+ *(void **)&phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
33824+ *(void **)&phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
33825+ *(void **)&phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
33826+ *(void **)&phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
33827 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
33828
33829 /*
33830@@ -289,12 +289,12 @@ static s32 e1000_init_phy_params_pchlan(
33831 phy->type = e1000e_get_phy_type_from_id(phy->id);
33832
33833 if (phy->type == e1000_phy_82577) {
33834- phy->ops.check_polarity = e1000_check_polarity_82577;
33835- phy->ops.force_speed_duplex =
33836+ *(void **)&phy->ops.check_polarity = e1000_check_polarity_82577;
33837+ *(void **)&phy->ops.force_speed_duplex =
33838 e1000_phy_force_speed_duplex_82577;
33839- phy->ops.get_cable_length = e1000_get_cable_length_82577;
33840- phy->ops.get_phy_info = e1000_get_phy_info_82577;
33841- phy->ops.commit_phy = e1000e_phy_sw_reset;
33842+ *(void **)&phy->ops.get_cable_length = e1000_get_cable_length_82577;
33843+ *(void **)&phy->ops.get_phy_info = e1000_get_phy_info_82577;
33844+ *(void **)&phy->ops.commit_phy = e1000e_phy_sw_reset;
33845 }
33846
33847 out:
33848@@ -322,8 +322,8 @@ static s32 e1000_init_phy_params_ich8lan
33849 */
33850 ret_val = e1000e_determine_phy_address(hw);
33851 if (ret_val) {
33852- hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33853- hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33854+ *(void **)&hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33855+ *(void **)&hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33856 ret_val = e1000e_determine_phy_address(hw);
33857 if (ret_val)
33858 return ret_val;
33859@@ -343,8 +343,8 @@ static s32 e1000_init_phy_params_ich8lan
33860 case IGP03E1000_E_PHY_ID:
33861 phy->type = e1000_phy_igp_3;
33862 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
33863- phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
33864- phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
33865+ *(void **)&phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
33866+ *(void **)&phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
33867 break;
33868 case IFE_E_PHY_ID:
33869 case IFE_PLUS_E_PHY_ID:
33870@@ -355,16 +355,16 @@ static s32 e1000_init_phy_params_ich8lan
33871 case BME1000_E_PHY_ID:
33872 phy->type = e1000_phy_bm;
33873 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
33874- hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33875- hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33876- hw->phy.ops.commit_phy = e1000e_phy_sw_reset;
33877+ *(void **)&hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
33878+ *(void **)&hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
33879+ *(void **)&hw->phy.ops.commit_phy = e1000e_phy_sw_reset;
33880 break;
33881 default:
33882 return -E1000_ERR_PHY;
33883 break;
33884 }
33885
33886- phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33887+ *(void **)&phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
33888
33889 return 0;
33890 }
33891@@ -455,25 +455,25 @@ static s32 e1000_init_mac_params_ich8lan
33892 case e1000_ich9lan:
33893 case e1000_ich10lan:
33894 /* ID LED init */
33895- mac->ops.id_led_init = e1000e_id_led_init;
33896+ *(void **)&mac->ops.id_led_init = e1000e_id_led_init;
33897 /* setup LED */
33898- mac->ops.setup_led = e1000e_setup_led_generic;
33899+ *(void **)&mac->ops.setup_led = e1000e_setup_led_generic;
33900 /* cleanup LED */
33901- mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
33902+ *(void **)&mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
33903 /* turn on/off LED */
33904- mac->ops.led_on = e1000_led_on_ich8lan;
33905- mac->ops.led_off = e1000_led_off_ich8lan;
33906+ *(void **)&mac->ops.led_on = e1000_led_on_ich8lan;
33907+ *(void **)&mac->ops.led_off = e1000_led_off_ich8lan;
33908 break;
33909 case e1000_pchlan:
33910 /* ID LED init */
33911- mac->ops.id_led_init = e1000_id_led_init_pchlan;
33912+ *(void **)&mac->ops.id_led_init = e1000_id_led_init_pchlan;
33913 /* setup LED */
33914- mac->ops.setup_led = e1000_setup_led_pchlan;
33915+ *(void **)&mac->ops.setup_led = e1000_setup_led_pchlan;
33916 /* cleanup LED */
33917- mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
33918+ *(void **)&mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
33919 /* turn on/off LED */
33920- mac->ops.led_on = e1000_led_on_pchlan;
33921- mac->ops.led_off = e1000_led_off_pchlan;
33922+ *(void **)&mac->ops.led_on = e1000_led_on_pchlan;
33923+ *(void **)&mac->ops.led_off = e1000_led_off_pchlan;
33924 break;
33925 default:
33926 break;
33927@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
33928 }
33929 }
33930
33931-static struct e1000_mac_operations ich8_mac_ops = {
33932+static const struct e1000_mac_operations ich8_mac_ops = {
33933 .id_led_init = e1000e_id_led_init,
33934 .check_mng_mode = e1000_check_mng_mode_ich8lan,
33935 .check_for_link = e1000_check_for_copper_link_ich8lan,
33936@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
33937 /* id_led_init dependent on mac type */
33938 };
33939
33940-static struct e1000_phy_operations ich8_phy_ops = {
33941+static const struct e1000_phy_operations ich8_phy_ops = {
33942 .acquire_phy = e1000_acquire_swflag_ich8lan,
33943 .check_reset_block = e1000_check_reset_block_ich8lan,
33944 .commit_phy = NULL,
33945@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
33946 .write_phy_reg = e1000e_write_phy_reg_igp,
33947 };
33948
33949-static struct e1000_nvm_operations ich8_nvm_ops = {
33950+static const struct e1000_nvm_operations ich8_nvm_ops = {
33951 .acquire_nvm = e1000_acquire_nvm_ich8lan,
33952 .read_nvm = e1000_read_nvm_ich8lan,
33953 .release_nvm = e1000_release_nvm_ich8lan,
33954diff -urNp linux-2.6.32.45/drivers/net/e1000e/netdev.c linux-2.6.32.45/drivers/net/e1000e/netdev.c
33955--- linux-2.6.32.45/drivers/net/e1000e/netdev.c 2011-03-27 14:31:47.000000000 -0400
33956+++ linux-2.6.32.45/drivers/net/e1000e/netdev.c 2011-08-05 20:33:55.000000000 -0400
33957@@ -5071,9 +5071,9 @@ static int __devinit e1000_probe(struct
33958
33959 err = -EIO;
33960
33961- memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
33962- memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
33963- memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
33964+ memcpy((void *)&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
33965+ memcpy((void *)&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
33966+ memcpy((void *)&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
33967
33968 err = ei->get_variants(adapter);
33969 if (err)
33970diff -urNp linux-2.6.32.45/drivers/net/hamradio/6pack.c linux-2.6.32.45/drivers/net/hamradio/6pack.c
33971--- linux-2.6.32.45/drivers/net/hamradio/6pack.c 2011-07-13 17:23:04.000000000 -0400
33972+++ linux-2.6.32.45/drivers/net/hamradio/6pack.c 2011-07-13 17:23:18.000000000 -0400
33973@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
33974 unsigned char buf[512];
33975 int count1;
33976
33977+ pax_track_stack();
33978+
33979 if (!count)
33980 return;
33981
33982diff -urNp linux-2.6.32.45/drivers/net/ibmveth.c linux-2.6.32.45/drivers/net/ibmveth.c
33983--- linux-2.6.32.45/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
33984+++ linux-2.6.32.45/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
33985@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
33986 NULL,
33987 };
33988
33989-static struct sysfs_ops veth_pool_ops = {
33990+static const struct sysfs_ops veth_pool_ops = {
33991 .show = veth_pool_show,
33992 .store = veth_pool_store,
33993 };
33994diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_82575.c linux-2.6.32.45/drivers/net/igb/e1000_82575.c
33995--- linux-2.6.32.45/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
33996+++ linux-2.6.32.45/drivers/net/igb/e1000_82575.c 2011-08-05 20:33:55.000000000 -0400
33997@@ -135,7 +135,7 @@ static s32 igb_get_invariants_82575(stru
33998 ? true : false;
33999
34000 /* physical interface link setup */
34001- mac->ops.setup_physical_interface =
34002+ *(void **)&mac->ops.setup_physical_interface =
34003 (hw->phy.media_type == e1000_media_type_copper)
34004 ? igb_setup_copper_link_82575
34005 : igb_setup_serdes_link_82575;
34006@@ -191,13 +191,13 @@ static s32 igb_get_invariants_82575(stru
34007
34008 /* PHY function pointers */
34009 if (igb_sgmii_active_82575(hw)) {
34010- phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
34011- phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
34012- phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
34013+ *(void **)&phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
34014+ *(void **)&phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
34015+ *(void **)&phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
34016 } else {
34017- phy->ops.reset = igb_phy_hw_reset;
34018- phy->ops.read_reg = igb_read_phy_reg_igp;
34019- phy->ops.write_reg = igb_write_phy_reg_igp;
34020+ *(void **)&phy->ops.reset = igb_phy_hw_reset;
34021+ *(void **)&phy->ops.read_reg = igb_read_phy_reg_igp;
34022+ *(void **)&phy->ops.write_reg = igb_write_phy_reg_igp;
34023 }
34024
34025 /* set lan id */
34026@@ -213,17 +213,17 @@ static s32 igb_get_invariants_82575(stru
34027 switch (phy->id) {
34028 case M88E1111_I_PHY_ID:
34029 phy->type = e1000_phy_m88;
34030- phy->ops.get_phy_info = igb_get_phy_info_m88;
34031- phy->ops.get_cable_length = igb_get_cable_length_m88;
34032- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
34033+ *(void **)&phy->ops.get_phy_info = igb_get_phy_info_m88;
34034+ *(void **)&phy->ops.get_cable_length = igb_get_cable_length_m88;
34035+ *(void **)&phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
34036 break;
34037 case IGP03E1000_E_PHY_ID:
34038 phy->type = e1000_phy_igp_3;
34039- phy->ops.get_phy_info = igb_get_phy_info_igp;
34040- phy->ops.get_cable_length = igb_get_cable_length_igp_2;
34041- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
34042- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
34043- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
34044+ *(void **)&phy->ops.get_phy_info = igb_get_phy_info_igp;
34045+ *(void **)&phy->ops.get_cable_length = igb_get_cable_length_igp_2;
34046+ *(void **)&phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
34047+ *(void **)&phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
34048+ *(void **)&phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
34049 break;
34050 default:
34051 return -E1000_ERR_PHY;
34052@@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
34053 wr32(E1000_VT_CTL, vt_ctl);
34054 }
34055
34056-static struct e1000_mac_operations e1000_mac_ops_82575 = {
34057+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
34058 .reset_hw = igb_reset_hw_82575,
34059 .init_hw = igb_init_hw_82575,
34060 .check_for_link = igb_check_for_link_82575,
34061@@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
34062 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
34063 };
34064
34065-static struct e1000_phy_operations e1000_phy_ops_82575 = {
34066+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
34067 .acquire = igb_acquire_phy_82575,
34068 .get_cfg_done = igb_get_cfg_done_82575,
34069 .release = igb_release_phy_82575,
34070 };
34071
34072-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
34073+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
34074 .acquire = igb_acquire_nvm_82575,
34075 .read = igb_read_nvm_eerd,
34076 .release = igb_release_nvm_82575,
34077diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_hw.h linux-2.6.32.45/drivers/net/igb/e1000_hw.h
34078--- linux-2.6.32.45/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
34079+++ linux-2.6.32.45/drivers/net/igb/e1000_hw.h 2011-04-17 15:56:46.000000000 -0400
34080@@ -305,17 +305,17 @@ struct e1000_phy_operations {
34081 };
34082
34083 struct e1000_nvm_operations {
34084- s32 (*acquire)(struct e1000_hw *);
34085- s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
34086- void (*release)(struct e1000_hw *);
34087- s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34088+ s32 (* const acquire)(struct e1000_hw *);
34089+ s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
34090+ void (* const release)(struct e1000_hw *);
34091+ s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
34092 };
34093
34094 struct e1000_info {
34095 s32 (*get_invariants)(struct e1000_hw *);
34096- struct e1000_mac_operations *mac_ops;
34097- struct e1000_phy_operations *phy_ops;
34098- struct e1000_nvm_operations *nvm_ops;
34099+ const struct e1000_mac_operations *mac_ops;
34100+ const struct e1000_phy_operations *phy_ops;
34101+ const struct e1000_nvm_operations *nvm_ops;
34102 };
34103
34104 extern const struct e1000_info e1000_82575_info;
34105diff -urNp linux-2.6.32.45/drivers/net/igb/e1000_mbx.c linux-2.6.32.45/drivers/net/igb/e1000_mbx.c
34106--- linux-2.6.32.45/drivers/net/igb/e1000_mbx.c 2011-03-27 14:31:47.000000000 -0400
34107+++ linux-2.6.32.45/drivers/net/igb/e1000_mbx.c 2011-08-05 20:33:55.000000000 -0400
34108@@ -414,13 +414,13 @@ s32 igb_init_mbx_params_pf(struct e1000_
34109
34110 mbx->size = E1000_VFMAILBOX_SIZE;
34111
34112- mbx->ops.read = igb_read_mbx_pf;
34113- mbx->ops.write = igb_write_mbx_pf;
34114- mbx->ops.read_posted = igb_read_posted_mbx;
34115- mbx->ops.write_posted = igb_write_posted_mbx;
34116- mbx->ops.check_for_msg = igb_check_for_msg_pf;
34117- mbx->ops.check_for_ack = igb_check_for_ack_pf;
34118- mbx->ops.check_for_rst = igb_check_for_rst_pf;
34119+ *(void **)&mbx->ops.read = igb_read_mbx_pf;
34120+ *(void **)&mbx->ops.write = igb_write_mbx_pf;
34121+ *(void **)&mbx->ops.read_posted = igb_read_posted_mbx;
34122+ *(void **)&mbx->ops.write_posted = igb_write_posted_mbx;
34123+ *(void **)&mbx->ops.check_for_msg = igb_check_for_msg_pf;
34124+ *(void **)&mbx->ops.check_for_ack = igb_check_for_ack_pf;
34125+ *(void **)&mbx->ops.check_for_rst = igb_check_for_rst_pf;
34126
34127 mbx->stats.msgs_tx = 0;
34128 mbx->stats.msgs_rx = 0;
34129diff -urNp linux-2.6.32.45/drivers/net/igb/igb_main.c linux-2.6.32.45/drivers/net/igb/igb_main.c
34130--- linux-2.6.32.45/drivers/net/igb/igb_main.c 2011-03-27 14:31:47.000000000 -0400
34131+++ linux-2.6.32.45/drivers/net/igb/igb_main.c 2011-08-05 20:33:55.000000000 -0400
34132@@ -1295,9 +1295,9 @@ static int __devinit igb_probe(struct pc
34133 /* setup the private structure */
34134 hw->back = adapter;
34135 /* Copy the default MAC, PHY and NVM function pointers */
34136- memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
34137- memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
34138- memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
34139+ memcpy((void *)&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
34140+ memcpy((void *)&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
34141+ memcpy((void *)&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
34142 /* Initialize skew-specific constants */
34143 err = ei->get_invariants(hw);
34144 if (err)
34145diff -urNp linux-2.6.32.45/drivers/net/igbvf/mbx.c linux-2.6.32.45/drivers/net/igbvf/mbx.c
34146--- linux-2.6.32.45/drivers/net/igbvf/mbx.c 2011-03-27 14:31:47.000000000 -0400
34147+++ linux-2.6.32.45/drivers/net/igbvf/mbx.c 2011-08-05 20:33:55.000000000 -0400
34148@@ -331,13 +331,13 @@ s32 e1000_init_mbx_params_vf(struct e100
34149
34150 mbx->size = E1000_VFMAILBOX_SIZE;
34151
34152- mbx->ops.read = e1000_read_mbx_vf;
34153- mbx->ops.write = e1000_write_mbx_vf;
34154- mbx->ops.read_posted = e1000_read_posted_mbx;
34155- mbx->ops.write_posted = e1000_write_posted_mbx;
34156- mbx->ops.check_for_msg = e1000_check_for_msg_vf;
34157- mbx->ops.check_for_ack = e1000_check_for_ack_vf;
34158- mbx->ops.check_for_rst = e1000_check_for_rst_vf;
34159+ *(void **)&mbx->ops.read = e1000_read_mbx_vf;
34160+ *(void **)&mbx->ops.write = e1000_write_mbx_vf;
34161+ *(void **)&mbx->ops.read_posted = e1000_read_posted_mbx;
34162+ *(void **)&mbx->ops.write_posted = e1000_write_posted_mbx;
34163+ *(void **)&mbx->ops.check_for_msg = e1000_check_for_msg_vf;
34164+ *(void **)&mbx->ops.check_for_ack = e1000_check_for_ack_vf;
34165+ *(void **)&mbx->ops.check_for_rst = e1000_check_for_rst_vf;
34166
34167 mbx->stats.msgs_tx = 0;
34168 mbx->stats.msgs_rx = 0;
34169diff -urNp linux-2.6.32.45/drivers/net/igbvf/vf.c linux-2.6.32.45/drivers/net/igbvf/vf.c
34170--- linux-2.6.32.45/drivers/net/igbvf/vf.c 2011-03-27 14:31:47.000000000 -0400
34171+++ linux-2.6.32.45/drivers/net/igbvf/vf.c 2011-08-05 20:33:55.000000000 -0400
34172@@ -55,21 +55,21 @@ static s32 e1000_init_mac_params_vf(stru
34173
34174 /* Function pointers */
34175 /* reset */
34176- mac->ops.reset_hw = e1000_reset_hw_vf;
34177+ *(void **)&mac->ops.reset_hw = e1000_reset_hw_vf;
34178 /* hw initialization */
34179- mac->ops.init_hw = e1000_init_hw_vf;
34180+ *(void **)&mac->ops.init_hw = e1000_init_hw_vf;
34181 /* check for link */
34182- mac->ops.check_for_link = e1000_check_for_link_vf;
34183+ *(void **)&mac->ops.check_for_link = e1000_check_for_link_vf;
34184 /* link info */
34185- mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
34186+ *(void **)&mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
34187 /* multicast address update */
34188- mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
34189+ *(void **)&mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
34190 /* set mac address */
34191- mac->ops.rar_set = e1000_rar_set_vf;
34192+ *(void **)&mac->ops.rar_set = e1000_rar_set_vf;
34193 /* read mac address */
34194- mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
34195+ *(void **)&mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
34196 /* set vlan filter table array */
34197- mac->ops.set_vfta = e1000_set_vfta_vf;
34198+ *(void **)&mac->ops.set_vfta = e1000_set_vfta_vf;
34199
34200 return E1000_SUCCESS;
34201 }
34202@@ -80,8 +80,8 @@ static s32 e1000_init_mac_params_vf(stru
34203 **/
34204 void e1000_init_function_pointers_vf(struct e1000_hw *hw)
34205 {
34206- hw->mac.ops.init_params = e1000_init_mac_params_vf;
34207- hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
34208+ *(void **)&hw->mac.ops.init_params = e1000_init_mac_params_vf;
34209+ *(void **)&hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
34210 }
34211
34212 /**
34213diff -urNp linux-2.6.32.45/drivers/net/iseries_veth.c linux-2.6.32.45/drivers/net/iseries_veth.c
34214--- linux-2.6.32.45/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
34215+++ linux-2.6.32.45/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
34216@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
34217 NULL
34218 };
34219
34220-static struct sysfs_ops veth_cnx_sysfs_ops = {
34221+static const struct sysfs_ops veth_cnx_sysfs_ops = {
34222 .show = veth_cnx_attribute_show
34223 };
34224
34225@@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
34226 NULL
34227 };
34228
34229-static struct sysfs_ops veth_port_sysfs_ops = {
34230+static const struct sysfs_ops veth_port_sysfs_ops = {
34231 .show = veth_port_attribute_show
34232 };
34233
34234diff -urNp linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c
34235--- linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
34236+++ linux-2.6.32.45/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
34237@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
34238 u32 rctl;
34239 int i;
34240
34241+ pax_track_stack();
34242+
34243 /* Check for Promiscuous and All Multicast modes */
34244
34245 rctl = IXGB_READ_REG(hw, RCTL);
34246diff -urNp linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c
34247--- linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
34248+++ linux-2.6.32.45/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
34249@@ -260,6 +260,9 @@ void __devinit
34250 ixgb_check_options(struct ixgb_adapter *adapter)
34251 {
34252 int bd = adapter->bd_number;
34253+
34254+ pax_track_stack();
34255+
34256 if (bd >= IXGB_MAX_NIC) {
34257 printk(KERN_NOTICE
34258 "Warning: no configuration for board #%i\n", bd);
34259diff -urNp linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c
34260--- linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c 2011-03-27 14:31:47.000000000 -0400
34261+++ linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82598.c 2011-08-05 20:33:55.000000000 -0400
34262@@ -154,19 +154,19 @@ static s32 ixgbe_init_phy_ops_82598(stru
34263
34264 /* Overwrite the link function pointers if copper PHY */
34265 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
34266- mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
34267- mac->ops.get_link_capabilities =
34268+ *(void **)&mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
34269+ *(void **)&mac->ops.get_link_capabilities =
34270 &ixgbe_get_copper_link_capabilities_82598;
34271 }
34272
34273 switch (hw->phy.type) {
34274 case ixgbe_phy_tn:
34275- phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34276- phy->ops.get_firmware_version =
34277+ *(void **)&phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34278+ *(void **)&phy->ops.get_firmware_version =
34279 &ixgbe_get_phy_firmware_version_tnx;
34280 break;
34281 case ixgbe_phy_nl:
34282- phy->ops.reset = &ixgbe_reset_phy_nl;
34283+ *(void **)&phy->ops.reset = &ixgbe_reset_phy_nl;
34284
34285 /* Call SFP+ identify routine to get the SFP+ module type */
34286 ret_val = phy->ops.identify_sfp(hw);
34287diff -urNp linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c
34288--- linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c 2011-03-27 14:31:47.000000000 -0400
34289+++ linux-2.6.32.45/drivers/net/ixgbe/ixgbe_82599.c 2011-08-05 20:33:55.000000000 -0400
34290@@ -62,9 +62,9 @@ static void ixgbe_init_mac_link_ops_8259
34291 struct ixgbe_mac_info *mac = &hw->mac;
34292 if (hw->phy.multispeed_fiber) {
34293 /* Set up dual speed SFP+ support */
34294- mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
34295+ *(void **)&mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
34296 } else {
34297- mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
34298+ *(void **)&mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
34299 }
34300 }
34301
34302@@ -76,7 +76,7 @@ static s32 ixgbe_setup_sfp_modules_82599
34303 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
34304 ixgbe_init_mac_link_ops_82599(hw);
34305
34306- hw->phy.ops.reset = NULL;
34307+ *(void **)&hw->phy.ops.reset = NULL;
34308
34309 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
34310 &data_offset);
34311@@ -171,16 +171,16 @@ static s32 ixgbe_init_phy_ops_82599(stru
34312
34313 /* If copper media, overwrite with copper function pointers */
34314 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
34315- mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
34316- mac->ops.get_link_capabilities =
34317+ *(void **)&mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
34318+ *(void **)&mac->ops.get_link_capabilities =
34319 &ixgbe_get_copper_link_capabilities_82599;
34320 }
34321
34322 /* Set necessary function pointers based on phy type */
34323 switch (hw->phy.type) {
34324 case ixgbe_phy_tn:
34325- phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34326- phy->ops.get_firmware_version =
34327+ *(void **)&phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34328+ *(void **)&phy->ops.get_firmware_version =
34329 &ixgbe_get_phy_firmware_version_tnx;
34330 break;
34331 default:
34332diff -urNp linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c
34333--- linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c 2011-03-27 14:31:47.000000000 -0400
34334+++ linux-2.6.32.45/drivers/net/ixgbe/ixgbe_main.c 2011-08-05 20:33:55.000000000 -0400
34335@@ -5638,18 +5638,18 @@ static int __devinit ixgbe_probe(struct
34336 adapter->bd_number = cards_found;
34337
34338 /* Setup hw api */
34339- memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
34340+ memcpy((void *)&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
34341 hw->mac.type = ii->mac;
34342
34343 /* EEPROM */
34344- memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
34345+ memcpy((void *)&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
34346 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
34347 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
34348 if (!(eec & (1 << 8)))
34349- hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
34350+ *(void **)&hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
34351
34352 /* PHY */
34353- memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
34354+ memcpy((void *)&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
34355 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
34356 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
34357 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
34358diff -urNp linux-2.6.32.45/drivers/net/mlx4/main.c linux-2.6.32.45/drivers/net/mlx4/main.c
34359--- linux-2.6.32.45/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
34360+++ linux-2.6.32.45/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
34361@@ -38,6 +38,7 @@
34362 #include <linux/errno.h>
34363 #include <linux/pci.h>
34364 #include <linux/dma-mapping.h>
34365+#include <linux/sched.h>
34366
34367 #include <linux/mlx4/device.h>
34368 #include <linux/mlx4/doorbell.h>
34369@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
34370 u64 icm_size;
34371 int err;
34372
34373+ pax_track_stack();
34374+
34375 err = mlx4_QUERY_FW(dev);
34376 if (err) {
34377 if (err == -EACCES)
34378diff -urNp linux-2.6.32.45/drivers/net/niu.c linux-2.6.32.45/drivers/net/niu.c
34379--- linux-2.6.32.45/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
34380+++ linux-2.6.32.45/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
34381@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
34382 int i, num_irqs, err;
34383 u8 first_ldg;
34384
34385+ pax_track_stack();
34386+
34387 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
34388 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
34389 ldg_num_map[i] = first_ldg + i;
34390diff -urNp linux-2.6.32.45/drivers/net/pcnet32.c linux-2.6.32.45/drivers/net/pcnet32.c
34391--- linux-2.6.32.45/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
34392+++ linux-2.6.32.45/drivers/net/pcnet32.c 2011-08-05 20:33:55.000000000 -0400
34393@@ -79,7 +79,7 @@ static int cards_found;
34394 /*
34395 * VLB I/O addresses
34396 */
34397-static unsigned int pcnet32_portlist[] __initdata =
34398+static unsigned int pcnet32_portlist[] __devinitdata =
34399 { 0x300, 0x320, 0x340, 0x360, 0 };
34400
34401 static int pcnet32_debug = 0;
34402@@ -267,7 +267,7 @@ struct pcnet32_private {
34403 struct sk_buff **rx_skbuff;
34404 dma_addr_t *tx_dma_addr;
34405 dma_addr_t *rx_dma_addr;
34406- struct pcnet32_access a;
34407+ struct pcnet32_access *a;
34408 spinlock_t lock; /* Guard lock */
34409 unsigned int cur_rx, cur_tx; /* The next free ring entry */
34410 unsigned int rx_ring_size; /* current rx ring size */
34411@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct n
34412 u16 val;
34413
34414 netif_wake_queue(dev);
34415- val = lp->a.read_csr(ioaddr, CSR3);
34416+ val = lp->a->read_csr(ioaddr, CSR3);
34417 val &= 0x00ff;
34418- lp->a.write_csr(ioaddr, CSR3, val);
34419+ lp->a->write_csr(ioaddr, CSR3, val);
34420 napi_enable(&lp->napi);
34421 }
34422
34423@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_d
34424 r = mii_link_ok(&lp->mii_if);
34425 } else if (lp->chip_version >= PCNET32_79C970A) {
34426 ulong ioaddr = dev->base_addr; /* card base I/O address */
34427- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34428+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34429 } else { /* can not detect link on really old chips */
34430 r = 1;
34431 }
34432@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct
34433 pcnet32_netif_stop(dev);
34434
34435 spin_lock_irqsave(&lp->lock, flags);
34436- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34437+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34438
34439 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
34440
34441@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct
34442 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
34443 {
34444 struct pcnet32_private *lp = netdev_priv(dev);
34445- struct pcnet32_access *a = &lp->a; /* access to registers */
34446+ struct pcnet32_access *a = lp->a; /* access to registers */
34447 ulong ioaddr = dev->base_addr; /* card base I/O address */
34448 struct sk_buff *skb; /* sk buff */
34449 int x, i; /* counters */
34450@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct
34451 pcnet32_netif_stop(dev);
34452
34453 spin_lock_irqsave(&lp->lock, flags);
34454- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34455+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34456
34457 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
34458
34459 /* Reset the PCNET32 */
34460- lp->a.reset(ioaddr);
34461- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34462+ lp->a->reset(ioaddr);
34463+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34464
34465 /* switch pcnet32 to 32bit mode */
34466- lp->a.write_bcr(ioaddr, 20, 2);
34467+ lp->a->write_bcr(ioaddr, 20, 2);
34468
34469 /* purge & init rings but don't actually restart */
34470 pcnet32_restart(dev, 0x0000);
34471
34472- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34473+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34474
34475 /* Initialize Transmit buffers. */
34476 size = data_len + 15;
34477@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct
34478
34479 /* set int loopback in CSR15 */
34480 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
34481- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
34482+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
34483
34484 teststatus = cpu_to_le16(0x8000);
34485- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34486+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34487
34488 /* Check status of descriptors */
34489 for (x = 0; x < numbuffs; x++) {
34490@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct
34491 }
34492 }
34493
34494- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34495+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34496 wmb();
34497 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
34498 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
34499@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct
34500 pcnet32_restart(dev, CSR0_NORMAL);
34501 } else {
34502 pcnet32_purge_rx_ring(dev);
34503- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34504+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34505 }
34506 spin_unlock_irqrestore(&lp->lock, flags);
34507
34508@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct
34509 static void pcnet32_led_blink_callback(struct net_device *dev)
34510 {
34511 struct pcnet32_private *lp = netdev_priv(dev);
34512- struct pcnet32_access *a = &lp->a;
34513+ struct pcnet32_access *a = lp->a;
34514 ulong ioaddr = dev->base_addr;
34515 unsigned long flags;
34516 int i;
34517@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(s
34518 static int pcnet32_phys_id(struct net_device *dev, u32 data)
34519 {
34520 struct pcnet32_private *lp = netdev_priv(dev);
34521- struct pcnet32_access *a = &lp->a;
34522+ struct pcnet32_access *a = lp->a;
34523 ulong ioaddr = dev->base_addr;
34524 unsigned long flags;
34525 int i, regs[4];
34526@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_de
34527 {
34528 int csr5;
34529 struct pcnet32_private *lp = netdev_priv(dev);
34530- struct pcnet32_access *a = &lp->a;
34531+ struct pcnet32_access *a = lp->a;
34532 ulong ioaddr = dev->base_addr;
34533 int ticks;
34534
34535@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_stru
34536 spin_lock_irqsave(&lp->lock, flags);
34537 if (pcnet32_tx(dev)) {
34538 /* reset the chip to clear the error condition, then restart */
34539- lp->a.reset(ioaddr);
34540- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34541+ lp->a->reset(ioaddr);
34542+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34543 pcnet32_restart(dev, CSR0_START);
34544 netif_wake_queue(dev);
34545 }
34546@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_stru
34547 __napi_complete(napi);
34548
34549 /* clear interrupt masks */
34550- val = lp->a.read_csr(ioaddr, CSR3);
34551+ val = lp->a->read_csr(ioaddr, CSR3);
34552 val &= 0x00ff;
34553- lp->a.write_csr(ioaddr, CSR3, val);
34554+ lp->a->write_csr(ioaddr, CSR3, val);
34555
34556 /* Set interrupt enable. */
34557- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
34558+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
34559
34560 spin_unlock_irqrestore(&lp->lock, flags);
34561 }
34562@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_
34563 int i, csr0;
34564 u16 *buff = ptr;
34565 struct pcnet32_private *lp = netdev_priv(dev);
34566- struct pcnet32_access *a = &lp->a;
34567+ struct pcnet32_access *a = lp->a;
34568 ulong ioaddr = dev->base_addr;
34569 unsigned long flags;
34570
34571@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_
34572 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
34573 if (lp->phymask & (1 << j)) {
34574 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
34575- lp->a.write_bcr(ioaddr, 33,
34576+ lp->a->write_bcr(ioaddr, 33,
34577 (j << 5) | i);
34578- *buff++ = lp->a.read_bcr(ioaddr, 34);
34579+ *buff++ = lp->a->read_bcr(ioaddr, 34);
34580 }
34581 }
34582 }
34583@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34584 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
34585 lp->options |= PCNET32_PORT_FD;
34586
34587- lp->a = *a;
34588+ lp->a = a;
34589
34590 /* prior to register_netdev, dev->name is not yet correct */
34591 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
34592@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34593 if (lp->mii) {
34594 /* lp->phycount and lp->phymask are set to 0 by memset above */
34595
34596- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34597+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34598 /* scan for PHYs */
34599 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34600 unsigned short id1, id2;
34601@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34602 "Found PHY %04x:%04x at address %d.\n",
34603 id1, id2, i);
34604 }
34605- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34606+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34607 if (lp->phycount > 1) {
34608 lp->options |= PCNET32_PORT_MII;
34609 }
34610@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_devic
34611 }
34612
34613 /* Reset the PCNET32 */
34614- lp->a.reset(ioaddr);
34615+ lp->a->reset(ioaddr);
34616
34617 /* switch pcnet32 to 32bit mode */
34618- lp->a.write_bcr(ioaddr, 20, 2);
34619+ lp->a->write_bcr(ioaddr, 20, 2);
34620
34621 if (netif_msg_ifup(lp))
34622 printk(KERN_DEBUG
34623@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_devic
34624 (u32) (lp->init_dma_addr));
34625
34626 /* set/reset autoselect bit */
34627- val = lp->a.read_bcr(ioaddr, 2) & ~2;
34628+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
34629 if (lp->options & PCNET32_PORT_ASEL)
34630 val |= 2;
34631- lp->a.write_bcr(ioaddr, 2, val);
34632+ lp->a->write_bcr(ioaddr, 2, val);
34633
34634 /* handle full duplex setting */
34635 if (lp->mii_if.full_duplex) {
34636- val = lp->a.read_bcr(ioaddr, 9) & ~3;
34637+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
34638 if (lp->options & PCNET32_PORT_FD) {
34639 val |= 1;
34640 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
34641@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_devic
34642 if (lp->chip_version == 0x2627)
34643 val |= 3;
34644 }
34645- lp->a.write_bcr(ioaddr, 9, val);
34646+ lp->a->write_bcr(ioaddr, 9, val);
34647 }
34648
34649 /* set/reset GPSI bit in test register */
34650- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
34651+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
34652 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
34653 val |= 0x10;
34654- lp->a.write_csr(ioaddr, 124, val);
34655+ lp->a->write_csr(ioaddr, 124, val);
34656
34657 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
34658 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
34659@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_devic
34660 * duplex, and/or enable auto negotiation, and clear DANAS
34661 */
34662 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
34663- lp->a.write_bcr(ioaddr, 32,
34664- lp->a.read_bcr(ioaddr, 32) | 0x0080);
34665+ lp->a->write_bcr(ioaddr, 32,
34666+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
34667 /* disable Auto Negotiation, set 10Mpbs, HD */
34668- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
34669+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
34670 if (lp->options & PCNET32_PORT_FD)
34671 val |= 0x10;
34672 if (lp->options & PCNET32_PORT_100)
34673 val |= 0x08;
34674- lp->a.write_bcr(ioaddr, 32, val);
34675+ lp->a->write_bcr(ioaddr, 32, val);
34676 } else {
34677 if (lp->options & PCNET32_PORT_ASEL) {
34678- lp->a.write_bcr(ioaddr, 32,
34679- lp->a.read_bcr(ioaddr,
34680+ lp->a->write_bcr(ioaddr, 32,
34681+ lp->a->read_bcr(ioaddr,
34682 32) | 0x0080);
34683 /* enable auto negotiate, setup, disable fd */
34684- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
34685+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
34686 val |= 0x20;
34687- lp->a.write_bcr(ioaddr, 32, val);
34688+ lp->a->write_bcr(ioaddr, 32, val);
34689 }
34690 }
34691 } else {
34692@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_devic
34693 * There is really no good other way to handle multiple PHYs
34694 * other than turning off all automatics
34695 */
34696- val = lp->a.read_bcr(ioaddr, 2);
34697- lp->a.write_bcr(ioaddr, 2, val & ~2);
34698- val = lp->a.read_bcr(ioaddr, 32);
34699- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34700+ val = lp->a->read_bcr(ioaddr, 2);
34701+ lp->a->write_bcr(ioaddr, 2, val & ~2);
34702+ val = lp->a->read_bcr(ioaddr, 32);
34703+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
34704
34705 if (!(lp->options & PCNET32_PORT_ASEL)) {
34706 /* setup ecmd */
34707@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_devic
34708 ecmd.speed =
34709 lp->
34710 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
34711- bcr9 = lp->a.read_bcr(ioaddr, 9);
34712+ bcr9 = lp->a->read_bcr(ioaddr, 9);
34713
34714 if (lp->options & PCNET32_PORT_FD) {
34715 ecmd.duplex = DUPLEX_FULL;
34716@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_devic
34717 ecmd.duplex = DUPLEX_HALF;
34718 bcr9 |= ~(1 << 0);
34719 }
34720- lp->a.write_bcr(ioaddr, 9, bcr9);
34721+ lp->a->write_bcr(ioaddr, 9, bcr9);
34722 }
34723
34724 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34725@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_devic
34726
34727 #ifdef DO_DXSUFLO
34728 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
34729- val = lp->a.read_csr(ioaddr, CSR3);
34730+ val = lp->a->read_csr(ioaddr, CSR3);
34731 val |= 0x40;
34732- lp->a.write_csr(ioaddr, CSR3, val);
34733+ lp->a->write_csr(ioaddr, CSR3, val);
34734 }
34735 #endif
34736
34737@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_devic
34738 napi_enable(&lp->napi);
34739
34740 /* Re-initialize the PCNET32, and start it when done. */
34741- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34742- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34743+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
34744+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
34745
34746- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34747- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34748+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34749+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34750
34751 netif_start_queue(dev);
34752
34753@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_devic
34754
34755 i = 0;
34756 while (i++ < 100)
34757- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34758+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34759 break;
34760 /*
34761 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
34762 * reports that doing so triggers a bug in the '974.
34763 */
34764- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
34765+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
34766
34767 if (netif_msg_ifup(lp))
34768 printk(KERN_DEBUG
34769 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
34770 dev->name, i,
34771 (u32) (lp->init_dma_addr),
34772- lp->a.read_csr(ioaddr, CSR0));
34773+ lp->a->read_csr(ioaddr, CSR0));
34774
34775 spin_unlock_irqrestore(&lp->lock, flags);
34776
34777@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_devic
34778 * Switch back to 16bit mode to avoid problems with dumb
34779 * DOS packet driver after a warm reboot
34780 */
34781- lp->a.write_bcr(ioaddr, 20, 4);
34782+ lp->a->write_bcr(ioaddr, 20, 4);
34783
34784 err_free_irq:
34785 spin_unlock_irqrestore(&lp->lock, flags);
34786@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_d
34787
34788 /* wait for stop */
34789 for (i = 0; i < 100; i++)
34790- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
34791+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
34792 break;
34793
34794 if (i >= 100 && netif_msg_drv(lp))
34795@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_d
34796 return;
34797
34798 /* ReInit Ring */
34799- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
34800+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
34801 i = 0;
34802 while (i++ < 1000)
34803- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
34804+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
34805 break;
34806
34807- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
34808+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
34809 }
34810
34811 static void pcnet32_tx_timeout(struct net_device *dev)
34812@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct ne
34813 if (pcnet32_debug & NETIF_MSG_DRV)
34814 printk(KERN_ERR
34815 "%s: transmit timed out, status %4.4x, resetting.\n",
34816- dev->name, lp->a.read_csr(ioaddr, CSR0));
34817- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34818+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34819+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34820 dev->stats.tx_errors++;
34821 if (netif_msg_tx_err(lp)) {
34822 int i;
34823@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34824 if (netif_msg_tx_queued(lp)) {
34825 printk(KERN_DEBUG
34826 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
34827- dev->name, lp->a.read_csr(ioaddr, CSR0));
34828+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34829 }
34830
34831 /* Default status -- will not enable Successful-TxDone
34832@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(st
34833 dev->stats.tx_bytes += skb->len;
34834
34835 /* Trigger an immediate send poll. */
34836- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34837+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
34838
34839 dev->trans_start = jiffies;
34840
34841@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
34842
34843 spin_lock(&lp->lock);
34844
34845- csr0 = lp->a.read_csr(ioaddr, CSR0);
34846+ csr0 = lp->a->read_csr(ioaddr, CSR0);
34847 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
34848 if (csr0 == 0xffff) {
34849 break; /* PCMCIA remove happened */
34850 }
34851 /* Acknowledge all of the current interrupt sources ASAP. */
34852- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34853+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
34854
34855 if (netif_msg_intr(lp))
34856 printk(KERN_DEBUG
34857 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
34858- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
34859+ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
34860
34861 /* Log misc errors. */
34862 if (csr0 & 0x4000)
34863@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
34864 if (napi_schedule_prep(&lp->napi)) {
34865 u16 val;
34866 /* set interrupt masks */
34867- val = lp->a.read_csr(ioaddr, CSR3);
34868+ val = lp->a->read_csr(ioaddr, CSR3);
34869 val |= 0x5f00;
34870- lp->a.write_csr(ioaddr, CSR3, val);
34871+ lp->a->write_csr(ioaddr, CSR3, val);
34872
34873 __napi_schedule(&lp->napi);
34874 break;
34875 }
34876- csr0 = lp->a.read_csr(ioaddr, CSR0);
34877+ csr0 = lp->a->read_csr(ioaddr, CSR0);
34878 }
34879
34880 if (netif_msg_intr(lp))
34881 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
34882- dev->name, lp->a.read_csr(ioaddr, CSR0));
34883+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34884
34885 spin_unlock(&lp->lock);
34886
34887@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_devi
34888
34889 spin_lock_irqsave(&lp->lock, flags);
34890
34891- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34892+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34893
34894 if (netif_msg_ifdown(lp))
34895 printk(KERN_DEBUG
34896 "%s: Shutting down ethercard, status was %2.2x.\n",
34897- dev->name, lp->a.read_csr(ioaddr, CSR0));
34898+ dev->name, lp->a->read_csr(ioaddr, CSR0));
34899
34900 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
34901- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34902+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34903
34904 /*
34905 * Switch back to 16bit mode to avoid problems with dumb
34906 * DOS packet driver after a warm reboot
34907 */
34908- lp->a.write_bcr(ioaddr, 20, 4);
34909+ lp->a->write_bcr(ioaddr, 20, 4);
34910
34911 spin_unlock_irqrestore(&lp->lock, flags);
34912
34913@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_
34914 unsigned long flags;
34915
34916 spin_lock_irqsave(&lp->lock, flags);
34917- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
34918+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
34919 spin_unlock_irqrestore(&lp->lock, flags);
34920
34921 return &dev->stats;
34922@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struc
34923 if (dev->flags & IFF_ALLMULTI) {
34924 ib->filter[0] = cpu_to_le32(~0U);
34925 ib->filter[1] = cpu_to_le32(~0U);
34926- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34927- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34928- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34929- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34930+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34931+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34932+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34933+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34934 return;
34935 }
34936 /* clear the multicast filter */
34937@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struc
34938 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
34939 }
34940 for (i = 0; i < 4; i++)
34941- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
34942+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
34943 le16_to_cpu(mcast_table[i]));
34944 return;
34945 }
34946@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(s
34947
34948 spin_lock_irqsave(&lp->lock, flags);
34949 suspended = pcnet32_suspend(dev, &flags, 0);
34950- csr15 = lp->a.read_csr(ioaddr, CSR15);
34951+ csr15 = lp->a->read_csr(ioaddr, CSR15);
34952 if (dev->flags & IFF_PROMISC) {
34953 /* Log any net taps. */
34954 if (netif_msg_hw(lp))
34955@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(s
34956 lp->init_block->mode =
34957 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
34958 7);
34959- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
34960+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
34961 } else {
34962 lp->init_block->mode =
34963 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
34964- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34965+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34966 pcnet32_load_multicast(dev);
34967 }
34968
34969 if (suspended) {
34970 int csr5;
34971 /* clear SUSPEND (SPND) - CSR5 bit 0 */
34972- csr5 = lp->a.read_csr(ioaddr, CSR5);
34973- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34974+ csr5 = lp->a->read_csr(ioaddr, CSR5);
34975+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34976 } else {
34977- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34978+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34979 pcnet32_restart(dev, CSR0_NORMAL);
34980 netif_wake_queue(dev);
34981 }
34982@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *
34983 if (!lp->mii)
34984 return 0;
34985
34986- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34987- val_out = lp->a.read_bcr(ioaddr, 34);
34988+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34989+ val_out = lp->a->read_bcr(ioaddr, 34);
34990
34991 return val_out;
34992 }
34993@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device
34994 if (!lp->mii)
34995 return;
34996
34997- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34998- lp->a.write_bcr(ioaddr, 34, val);
34999+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
35000+ lp->a->write_bcr(ioaddr, 34, val);
35001 }
35002
35003 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
35004@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct n
35005 curr_link = mii_link_ok(&lp->mii_if);
35006 } else {
35007 ulong ioaddr = dev->base_addr; /* card base I/O address */
35008- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
35009+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
35010 }
35011 if (!curr_link) {
35012 if (prev_link || verbose) {
35013@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct n
35014 (ecmd.duplex ==
35015 DUPLEX_FULL) ? "full" : "half");
35016 }
35017- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
35018+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
35019 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
35020 if (lp->mii_if.full_duplex)
35021 bcr9 |= (1 << 0);
35022 else
35023 bcr9 &= ~(1 << 0);
35024- lp->a.write_bcr(dev->base_addr, 9, bcr9);
35025+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
35026 }
35027 } else {
35028 if (netif_msg_link(lp))
35029diff -urNp linux-2.6.32.45/drivers/net/tg3.h linux-2.6.32.45/drivers/net/tg3.h
35030--- linux-2.6.32.45/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
35031+++ linux-2.6.32.45/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
35032@@ -95,6 +95,7 @@
35033 #define CHIPREV_ID_5750_A0 0x4000
35034 #define CHIPREV_ID_5750_A1 0x4001
35035 #define CHIPREV_ID_5750_A3 0x4003
35036+#define CHIPREV_ID_5750_C1 0x4201
35037 #define CHIPREV_ID_5750_C2 0x4202
35038 #define CHIPREV_ID_5752_A0_HW 0x5000
35039 #define CHIPREV_ID_5752_A0 0x6000
35040diff -urNp linux-2.6.32.45/drivers/net/tokenring/abyss.c linux-2.6.32.45/drivers/net/tokenring/abyss.c
35041--- linux-2.6.32.45/drivers/net/tokenring/abyss.c 2011-03-27 14:31:47.000000000 -0400
35042+++ linux-2.6.32.45/drivers/net/tokenring/abyss.c 2011-08-05 20:33:55.000000000 -0400
35043@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
35044
35045 static int __init abyss_init (void)
35046 {
35047- abyss_netdev_ops = tms380tr_netdev_ops;
35048+ pax_open_kernel();
35049+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35050
35051- abyss_netdev_ops.ndo_open = abyss_open;
35052- abyss_netdev_ops.ndo_stop = abyss_close;
35053+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
35054+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
35055+ pax_close_kernel();
35056
35057 return pci_register_driver(&abyss_driver);
35058 }
35059diff -urNp linux-2.6.32.45/drivers/net/tokenring/madgemc.c linux-2.6.32.45/drivers/net/tokenring/madgemc.c
35060--- linux-2.6.32.45/drivers/net/tokenring/madgemc.c 2011-03-27 14:31:47.000000000 -0400
35061+++ linux-2.6.32.45/drivers/net/tokenring/madgemc.c 2011-08-05 20:33:55.000000000 -0400
35062@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver
35063
35064 static int __init madgemc_init (void)
35065 {
35066- madgemc_netdev_ops = tms380tr_netdev_ops;
35067- madgemc_netdev_ops.ndo_open = madgemc_open;
35068- madgemc_netdev_ops.ndo_stop = madgemc_close;
35069+ pax_open_kernel();
35070+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35071+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
35072+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
35073+ pax_close_kernel();
35074
35075 return mca_register_driver (&madgemc_driver);
35076 }
35077diff -urNp linux-2.6.32.45/drivers/net/tokenring/proteon.c linux-2.6.32.45/drivers/net/tokenring/proteon.c
35078--- linux-2.6.32.45/drivers/net/tokenring/proteon.c 2011-03-27 14:31:47.000000000 -0400
35079+++ linux-2.6.32.45/drivers/net/tokenring/proteon.c 2011-08-05 20:33:55.000000000 -0400
35080@@ -353,9 +353,11 @@ static int __init proteon_init(void)
35081 struct platform_device *pdev;
35082 int i, num = 0, err = 0;
35083
35084- proteon_netdev_ops = tms380tr_netdev_ops;
35085- proteon_netdev_ops.ndo_open = proteon_open;
35086- proteon_netdev_ops.ndo_stop = tms380tr_close;
35087+ pax_open_kernel();
35088+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35089+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
35090+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
35091+ pax_close_kernel();
35092
35093 err = platform_driver_register(&proteon_driver);
35094 if (err)
35095diff -urNp linux-2.6.32.45/drivers/net/tokenring/skisa.c linux-2.6.32.45/drivers/net/tokenring/skisa.c
35096--- linux-2.6.32.45/drivers/net/tokenring/skisa.c 2011-03-27 14:31:47.000000000 -0400
35097+++ linux-2.6.32.45/drivers/net/tokenring/skisa.c 2011-08-05 20:33:55.000000000 -0400
35098@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
35099 struct platform_device *pdev;
35100 int i, num = 0, err = 0;
35101
35102- sk_isa_netdev_ops = tms380tr_netdev_ops;
35103- sk_isa_netdev_ops.ndo_open = sk_isa_open;
35104- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35105+ pax_open_kernel();
35106+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35107+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
35108+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35109+ pax_close_kernel();
35110
35111 err = platform_driver_register(&sk_isa_driver);
35112 if (err)
35113diff -urNp linux-2.6.32.45/drivers/net/tulip/de2104x.c linux-2.6.32.45/drivers/net/tulip/de2104x.c
35114--- linux-2.6.32.45/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
35115+++ linux-2.6.32.45/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
35116@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
35117 struct de_srom_info_leaf *il;
35118 void *bufp;
35119
35120+ pax_track_stack();
35121+
35122 /* download entire eeprom */
35123 for (i = 0; i < DE_EEPROM_WORDS; i++)
35124 ((__le16 *)ee_data)[i] =
35125diff -urNp linux-2.6.32.45/drivers/net/tulip/de4x5.c linux-2.6.32.45/drivers/net/tulip/de4x5.c
35126--- linux-2.6.32.45/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
35127+++ linux-2.6.32.45/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
35128@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
35129 for (i=0; i<ETH_ALEN; i++) {
35130 tmp.addr[i] = dev->dev_addr[i];
35131 }
35132- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35133+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35134 break;
35135
35136 case DE4X5_SET_HWADDR: /* Set the hardware address */
35137@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
35138 spin_lock_irqsave(&lp->lock, flags);
35139 memcpy(&statbuf, &lp->pktStats, ioc->len);
35140 spin_unlock_irqrestore(&lp->lock, flags);
35141- if (copy_to_user(ioc->data, &statbuf, ioc->len))
35142+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
35143 return -EFAULT;
35144 break;
35145 }
35146diff -urNp linux-2.6.32.45/drivers/net/usb/hso.c linux-2.6.32.45/drivers/net/usb/hso.c
35147--- linux-2.6.32.45/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
35148+++ linux-2.6.32.45/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
35149@@ -71,7 +71,7 @@
35150 #include <asm/byteorder.h>
35151 #include <linux/serial_core.h>
35152 #include <linux/serial.h>
35153-
35154+#include <asm/local.h>
35155
35156 #define DRIVER_VERSION "1.2"
35157 #define MOD_AUTHOR "Option Wireless"
35158@@ -258,7 +258,7 @@ struct hso_serial {
35159
35160 /* from usb_serial_port */
35161 struct tty_struct *tty;
35162- int open_count;
35163+ local_t open_count;
35164 spinlock_t serial_lock;
35165
35166 int (*write_data) (struct hso_serial *serial);
35167@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
35168 struct urb *urb;
35169
35170 urb = serial->rx_urb[0];
35171- if (serial->open_count > 0) {
35172+ if (local_read(&serial->open_count) > 0) {
35173 count = put_rxbuf_data(urb, serial);
35174 if (count == -1)
35175 return;
35176@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
35177 DUMP1(urb->transfer_buffer, urb->actual_length);
35178
35179 /* Anyone listening? */
35180- if (serial->open_count == 0)
35181+ if (local_read(&serial->open_count) == 0)
35182 return;
35183
35184 if (status == 0) {
35185@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
35186 spin_unlock_irq(&serial->serial_lock);
35187
35188 /* check for port already opened, if not set the termios */
35189- serial->open_count++;
35190- if (serial->open_count == 1) {
35191+ if (local_inc_return(&serial->open_count) == 1) {
35192 tty->low_latency = 1;
35193 serial->rx_state = RX_IDLE;
35194 /* Force default termio settings */
35195@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
35196 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35197 if (result) {
35198 hso_stop_serial_device(serial->parent);
35199- serial->open_count--;
35200+ local_dec(&serial->open_count);
35201 kref_put(&serial->parent->ref, hso_serial_ref_free);
35202 }
35203 } else {
35204@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
35205
35206 /* reset the rts and dtr */
35207 /* do the actual close */
35208- serial->open_count--;
35209+ local_dec(&serial->open_count);
35210
35211- if (serial->open_count <= 0) {
35212- serial->open_count = 0;
35213+ if (local_read(&serial->open_count) <= 0) {
35214+ local_set(&serial->open_count, 0);
35215 spin_lock_irq(&serial->serial_lock);
35216 if (serial->tty == tty) {
35217 serial->tty->driver_data = NULL;
35218@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
35219
35220 /* the actual setup */
35221 spin_lock_irqsave(&serial->serial_lock, flags);
35222- if (serial->open_count)
35223+ if (local_read(&serial->open_count))
35224 _hso_serial_set_termios(tty, old);
35225 else
35226 tty->termios = old;
35227@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
35228 /* Start all serial ports */
35229 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35230 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35231- if (dev2ser(serial_table[i])->open_count) {
35232+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
35233 result =
35234 hso_start_serial_device(serial_table[i], GFP_NOIO);
35235 hso_kick_transmit(dev2ser(serial_table[i]));
35236diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-config.h linux-2.6.32.45/drivers/net/vxge/vxge-config.h
35237--- linux-2.6.32.45/drivers/net/vxge/vxge-config.h 2011-03-27 14:31:47.000000000 -0400
35238+++ linux-2.6.32.45/drivers/net/vxge/vxge-config.h 2011-08-05 20:33:55.000000000 -0400
35239@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
35240 void (*link_down)(struct __vxge_hw_device *devh);
35241 void (*crit_err)(struct __vxge_hw_device *devh,
35242 enum vxge_hw_event type, u64 ext_data);
35243-};
35244+} __no_const;
35245
35246 /*
35247 * struct __vxge_hw_blockpool_entry - Block private data structure
35248diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-main.c linux-2.6.32.45/drivers/net/vxge/vxge-main.c
35249--- linux-2.6.32.45/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
35250+++ linux-2.6.32.45/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
35251@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
35252 struct sk_buff *completed[NR_SKB_COMPLETED];
35253 int more;
35254
35255+ pax_track_stack();
35256+
35257 do {
35258 more = 0;
35259 skb_ptr = completed;
35260@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
35261 u8 mtable[256] = {0}; /* CPU to vpath mapping */
35262 int index;
35263
35264+ pax_track_stack();
35265+
35266 /*
35267 * Filling
35268 * - itable with bucket numbers
35269diff -urNp linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h
35270--- linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h 2011-03-27 14:31:47.000000000 -0400
35271+++ linux-2.6.32.45/drivers/net/vxge/vxge-traffic.h 2011-08-05 20:33:55.000000000 -0400
35272@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
35273 struct vxge_hw_mempool_dma *dma_object,
35274 u32 index,
35275 u32 is_last);
35276-};
35277+} __no_const;
35278
35279 void
35280 __vxge_hw_mempool_destroy(
35281diff -urNp linux-2.6.32.45/drivers/net/wan/cycx_x25.c linux-2.6.32.45/drivers/net/wan/cycx_x25.c
35282--- linux-2.6.32.45/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
35283+++ linux-2.6.32.45/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
35284@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
35285 unsigned char hex[1024],
35286 * phex = hex;
35287
35288+ pax_track_stack();
35289+
35290 if (len >= (sizeof(hex) / 2))
35291 len = (sizeof(hex) / 2) - 1;
35292
35293diff -urNp linux-2.6.32.45/drivers/net/wan/hdlc_x25.c linux-2.6.32.45/drivers/net/wan/hdlc_x25.c
35294--- linux-2.6.32.45/drivers/net/wan/hdlc_x25.c 2011-03-27 14:31:47.000000000 -0400
35295+++ linux-2.6.32.45/drivers/net/wan/hdlc_x25.c 2011-08-05 20:33:55.000000000 -0400
35296@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
35297
35298 static int x25_open(struct net_device *dev)
35299 {
35300- struct lapb_register_struct cb;
35301+ static struct lapb_register_struct cb = {
35302+ .connect_confirmation = x25_connected,
35303+ .connect_indication = x25_connected,
35304+ .disconnect_confirmation = x25_disconnected,
35305+ .disconnect_indication = x25_disconnected,
35306+ .data_indication = x25_data_indication,
35307+ .data_transmit = x25_data_transmit
35308+ };
35309 int result;
35310
35311- cb.connect_confirmation = x25_connected;
35312- cb.connect_indication = x25_connected;
35313- cb.disconnect_confirmation = x25_disconnected;
35314- cb.disconnect_indication = x25_disconnected;
35315- cb.data_indication = x25_data_indication;
35316- cb.data_transmit = x25_data_transmit;
35317-
35318 result = lapb_register(dev, &cb);
35319 if (result != LAPB_OK)
35320 return result;
35321diff -urNp linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c
35322--- linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
35323+++ linux-2.6.32.45/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
35324@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
35325 int do_autopm = 1;
35326 DECLARE_COMPLETION_ONSTACK(notif_completion);
35327
35328+ pax_track_stack();
35329+
35330 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
35331 i2400m, ack, ack_size);
35332 BUG_ON(_ack == i2400m->bm_ack_buf);
35333diff -urNp linux-2.6.32.45/drivers/net/wireless/airo.c linux-2.6.32.45/drivers/net/wireless/airo.c
35334--- linux-2.6.32.45/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
35335+++ linux-2.6.32.45/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
35336@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
35337 BSSListElement * loop_net;
35338 BSSListElement * tmp_net;
35339
35340+ pax_track_stack();
35341+
35342 /* Blow away current list of scan results */
35343 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
35344 list_move_tail (&loop_net->list, &ai->network_free_list);
35345@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
35346 WepKeyRid wkr;
35347 int rc;
35348
35349+ pax_track_stack();
35350+
35351 memset( &mySsid, 0, sizeof( mySsid ) );
35352 kfree (ai->flash);
35353 ai->flash = NULL;
35354@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
35355 __le32 *vals = stats.vals;
35356 int len;
35357
35358+ pax_track_stack();
35359+
35360 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
35361 return -ENOMEM;
35362 data = (struct proc_data *)file->private_data;
35363@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
35364 /* If doLoseSync is not 1, we won't do a Lose Sync */
35365 int doLoseSync = -1;
35366
35367+ pax_track_stack();
35368+
35369 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
35370 return -ENOMEM;
35371 data = (struct proc_data *)file->private_data;
35372@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
35373 int i;
35374 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
35375
35376+ pax_track_stack();
35377+
35378 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
35379 if (!qual)
35380 return -ENOMEM;
35381@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
35382 CapabilityRid cap_rid;
35383 __le32 *vals = stats_rid.vals;
35384
35385+ pax_track_stack();
35386+
35387 /* Get stats out of the card */
35388 clear_bit(JOB_WSTATS, &local->jobs);
35389 if (local->power.event) {
35390diff -urNp linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c
35391--- linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
35392+++ linux-2.6.32.45/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
35393@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
35394 unsigned int v;
35395 u64 tsf;
35396
35397+ pax_track_stack();
35398+
35399 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
35400 len += snprintf(buf+len, sizeof(buf)-len,
35401 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
35402@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
35403 unsigned int len = 0;
35404 unsigned int i;
35405
35406+ pax_track_stack();
35407+
35408 len += snprintf(buf+len, sizeof(buf)-len,
35409 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
35410
35411diff -urNp linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c
35412--- linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
35413+++ linux-2.6.32.45/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
35414@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
35415 char buf[512];
35416 unsigned int len = 0;
35417
35418+ pax_track_stack();
35419+
35420 len += snprintf(buf + len, sizeof(buf) - len,
35421 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
35422 len += snprintf(buf + len, sizeof(buf) - len,
35423@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
35424 int i;
35425 u8 addr[ETH_ALEN];
35426
35427+ pax_track_stack();
35428+
35429 len += snprintf(buf + len, sizeof(buf) - len,
35430 "primary: %s (%s chan=%d ht=%d)\n",
35431 wiphy_name(sc->pri_wiphy->hw->wiphy),
35432diff -urNp linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c
35433--- linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35434+++ linux-2.6.32.45/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35435@@ -43,7 +43,7 @@ static struct dentry *rootdir;
35436 struct b43_debugfs_fops {
35437 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
35438 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
35439- struct file_operations fops;
35440+ const struct file_operations fops;
35441 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
35442 size_t file_struct_offset;
35443 };
35444diff -urNp linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c
35445--- linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35446+++ linux-2.6.32.45/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35447@@ -44,7 +44,7 @@ static struct dentry *rootdir;
35448 struct b43legacy_debugfs_fops {
35449 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
35450 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
35451- struct file_operations fops;
35452+ const struct file_operations fops;
35453 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
35454 size_t file_struct_offset;
35455 /* Take wl->irq_lock before calling read/write? */
35456diff -urNp linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c
35457--- linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
35458+++ linux-2.6.32.45/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
35459@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
35460 int err;
35461 DECLARE_SSID_BUF(ssid);
35462
35463+ pax_track_stack();
35464+
35465 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
35466
35467 if (ssid_len)
35468@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
35469 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
35470 int err;
35471
35472+ pax_track_stack();
35473+
35474 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
35475 idx, keylen, len);
35476
35477diff -urNp linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c
35478--- linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
35479+++ linux-2.6.32.45/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
35480@@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
35481 unsigned long flags;
35482 DECLARE_SSID_BUF(ssid);
35483
35484+ pax_track_stack();
35485+
35486 LIBIPW_DEBUG_SCAN("'%s' (%pM"
35487 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
35488 print_ssid(ssid, info_element->data, info_element->len),
35489diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c
35490--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
35491+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
35492@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
35493 },
35494 };
35495
35496-static struct iwl_ops iwl1000_ops = {
35497+static const struct iwl_ops iwl1000_ops = {
35498 .ucode = &iwl5000_ucode,
35499 .lib = &iwl1000_lib,
35500 .hcmd = &iwl5000_hcmd,
35501diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c
35502--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-03-27 14:31:47.000000000 -0400
35503+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-08-05 20:33:55.000000000 -0400
35504@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_
35505 */
35506 if (iwl3945_mod_params.disable_hw_scan) {
35507 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
35508- iwl3945_hw_ops.hw_scan = NULL;
35509+ pax_open_kernel();
35510+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
35511+ pax_close_kernel();
35512 }
35513
35514
35515diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c
35516--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
35517+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
35518@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
35519 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
35520 };
35521
35522-static struct iwl_ops iwl3945_ops = {
35523+static const struct iwl_ops iwl3945_ops = {
35524 .ucode = &iwl3945_ucode,
35525 .lib = &iwl3945_lib,
35526 .hcmd = &iwl3945_hcmd,
35527diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c
35528--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
35529+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
35530@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
35531 },
35532 };
35533
35534-static struct iwl_ops iwl4965_ops = {
35535+static const struct iwl_ops iwl4965_ops = {
35536 .ucode = &iwl4965_ucode,
35537 .lib = &iwl4965_lib,
35538 .hcmd = &iwl4965_hcmd,
35539diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c
35540--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:55:34.000000000 -0400
35541+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:56:37.000000000 -0400
35542@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
35543 },
35544 };
35545
35546-struct iwl_ops iwl5000_ops = {
35547+const struct iwl_ops iwl5000_ops = {
35548 .ucode = &iwl5000_ucode,
35549 .lib = &iwl5000_lib,
35550 .hcmd = &iwl5000_hcmd,
35551 .utils = &iwl5000_hcmd_utils,
35552 };
35553
35554-static struct iwl_ops iwl5150_ops = {
35555+static const struct iwl_ops iwl5150_ops = {
35556 .ucode = &iwl5000_ucode,
35557 .lib = &iwl5150_lib,
35558 .hcmd = &iwl5000_hcmd,
35559diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c
35560--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
35561+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
35562@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
35563 .calc_rssi = iwl5000_calc_rssi,
35564 };
35565
35566-static struct iwl_ops iwl6000_ops = {
35567+static const struct iwl_ops iwl6000_ops = {
35568 .ucode = &iwl5000_ucode,
35569 .lib = &iwl6000_lib,
35570 .hcmd = &iwl5000_hcmd,
35571diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c
35572--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-03-27 14:31:47.000000000 -0400
35573+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-08-05 20:33:55.000000000 -0400
35574@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev
35575 if (iwl_debug_level & IWL_DL_INFO)
35576 dev_printk(KERN_DEBUG, &(pdev->dev),
35577 "Disabling hw_scan\n");
35578- iwl_hw_ops.hw_scan = NULL;
35579+ pax_open_kernel();
35580+ *(void **)&iwl_hw_ops.hw_scan = NULL;
35581+ pax_close_kernel();
35582 }
35583
35584 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
35585diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
35586--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
35587+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
35588@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
35589 u8 active_index = 0;
35590 s32 tpt = 0;
35591
35592+ pax_track_stack();
35593+
35594 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
35595
35596 if (!ieee80211_is_data(hdr->frame_control) ||
35597@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
35598 u8 valid_tx_ant = 0;
35599 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
35600
35601+ pax_track_stack();
35602+
35603 /* Override starting rate (index 0) if needed for debug purposes */
35604 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
35605
35606diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35607--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
35608+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
35609@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
35610 int pos = 0;
35611 const size_t bufsz = sizeof(buf);
35612
35613+ pax_track_stack();
35614+
35615 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
35616 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
35617 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
35618@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
35619 const size_t bufsz = sizeof(buf);
35620 ssize_t ret;
35621
35622+ pax_track_stack();
35623+
35624 for (i = 0; i < AC_NUM; i++) {
35625 pos += scnprintf(buf + pos, bufsz - pos,
35626 "\tcw_min\tcw_max\taifsn\ttxop\n");
35627diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h
35628--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
35629+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
35630@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
35631 #endif
35632
35633 #else
35634-#define IWL_DEBUG(__priv, level, fmt, args...)
35635-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
35636+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
35637+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
35638 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
35639 void *p, u32 len)
35640 {}
35641diff -urNp linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h
35642--- linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
35643+++ linux-2.6.32.45/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
35644@@ -68,7 +68,7 @@ struct iwl_tx_queue;
35645
35646 /* shared structures from iwl-5000.c */
35647 extern struct iwl_mod_params iwl50_mod_params;
35648-extern struct iwl_ops iwl5000_ops;
35649+extern const struct iwl_ops iwl5000_ops;
35650 extern struct iwl_ucode_ops iwl5000_ucode;
35651 extern struct iwl_lib_ops iwl5000_lib;
35652 extern struct iwl_hcmd_ops iwl5000_hcmd;
35653diff -urNp linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c
35654--- linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35655+++ linux-2.6.32.45/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
35656@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
35657 int buf_len = 512;
35658 size_t len = 0;
35659
35660+ pax_track_stack();
35661+
35662 if (*ppos != 0)
35663 return 0;
35664 if (count < sizeof(buf))
35665diff -urNp linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c
35666--- linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35667+++ linux-2.6.32.45/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35668@@ -708,7 +708,7 @@ out_unlock:
35669 struct lbs_debugfs_files {
35670 const char *name;
35671 int perm;
35672- struct file_operations fops;
35673+ const struct file_operations fops;
35674 };
35675
35676 static const struct lbs_debugfs_files debugfs_files[] = {
35677diff -urNp linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c
35678--- linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
35679+++ linux-2.6.32.45/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
35680@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
35681
35682 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
35683
35684- if (rts_threshold < 0 || rts_threshold > 2347)
35685+ if (rts_threshold > 2347)
35686 rts_threshold = 2347;
35687
35688 tmp = cpu_to_le32(rts_threshold);
35689diff -urNp linux-2.6.32.45/drivers/oprofile/buffer_sync.c linux-2.6.32.45/drivers/oprofile/buffer_sync.c
35690--- linux-2.6.32.45/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
35691+++ linux-2.6.32.45/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
35692@@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
35693 if (cookie == NO_COOKIE)
35694 offset = pc;
35695 if (cookie == INVALID_COOKIE) {
35696- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35697+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35698 offset = pc;
35699 }
35700 if (cookie != last_cookie) {
35701@@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
35702 /* add userspace sample */
35703
35704 if (!mm) {
35705- atomic_inc(&oprofile_stats.sample_lost_no_mm);
35706+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35707 return 0;
35708 }
35709
35710 cookie = lookup_dcookie(mm, s->eip, &offset);
35711
35712 if (cookie == INVALID_COOKIE) {
35713- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35714+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35715 return 0;
35716 }
35717
35718@@ -561,7 +561,7 @@ void sync_buffer(int cpu)
35719 /* ignore backtraces if failed to add a sample */
35720 if (state == sb_bt_start) {
35721 state = sb_bt_ignore;
35722- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35723+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35724 }
35725 }
35726 release_mm(mm);
35727diff -urNp linux-2.6.32.45/drivers/oprofile/event_buffer.c linux-2.6.32.45/drivers/oprofile/event_buffer.c
35728--- linux-2.6.32.45/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
35729+++ linux-2.6.32.45/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
35730@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
35731 }
35732
35733 if (buffer_pos == buffer_size) {
35734- atomic_inc(&oprofile_stats.event_lost_overflow);
35735+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35736 return;
35737 }
35738
35739diff -urNp linux-2.6.32.45/drivers/oprofile/oprof.c linux-2.6.32.45/drivers/oprofile/oprof.c
35740--- linux-2.6.32.45/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
35741+++ linux-2.6.32.45/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
35742@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
35743 if (oprofile_ops.switch_events())
35744 return;
35745
35746- atomic_inc(&oprofile_stats.multiplex_counter);
35747+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35748 start_switch_worker();
35749 }
35750
35751diff -urNp linux-2.6.32.45/drivers/oprofile/oprofilefs.c linux-2.6.32.45/drivers/oprofile/oprofilefs.c
35752--- linux-2.6.32.45/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
35753+++ linux-2.6.32.45/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
35754@@ -187,7 +187,7 @@ static const struct file_operations atom
35755
35756
35757 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35758- char const *name, atomic_t *val)
35759+ char const *name, atomic_unchecked_t *val)
35760 {
35761 struct dentry *d = __oprofilefs_create_file(sb, root, name,
35762 &atomic_ro_fops, 0444);
35763diff -urNp linux-2.6.32.45/drivers/oprofile/oprofile_stats.c linux-2.6.32.45/drivers/oprofile/oprofile_stats.c
35764--- linux-2.6.32.45/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
35765+++ linux-2.6.32.45/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
35766@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35767 cpu_buf->sample_invalid_eip = 0;
35768 }
35769
35770- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35771- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35772- atomic_set(&oprofile_stats.event_lost_overflow, 0);
35773- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35774- atomic_set(&oprofile_stats.multiplex_counter, 0);
35775+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35776+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35777+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35778+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35779+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35780 }
35781
35782
35783diff -urNp linux-2.6.32.45/drivers/oprofile/oprofile_stats.h linux-2.6.32.45/drivers/oprofile/oprofile_stats.h
35784--- linux-2.6.32.45/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
35785+++ linux-2.6.32.45/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
35786@@ -13,11 +13,11 @@
35787 #include <asm/atomic.h>
35788
35789 struct oprofile_stat_struct {
35790- atomic_t sample_lost_no_mm;
35791- atomic_t sample_lost_no_mapping;
35792- atomic_t bt_lost_no_mapping;
35793- atomic_t event_lost_overflow;
35794- atomic_t multiplex_counter;
35795+ atomic_unchecked_t sample_lost_no_mm;
35796+ atomic_unchecked_t sample_lost_no_mapping;
35797+ atomic_unchecked_t bt_lost_no_mapping;
35798+ atomic_unchecked_t event_lost_overflow;
35799+ atomic_unchecked_t multiplex_counter;
35800 };
35801
35802 extern struct oprofile_stat_struct oprofile_stats;
35803diff -urNp linux-2.6.32.45/drivers/parisc/pdc_stable.c linux-2.6.32.45/drivers/parisc/pdc_stable.c
35804--- linux-2.6.32.45/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
35805+++ linux-2.6.32.45/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
35806@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
35807 return ret;
35808 }
35809
35810-static struct sysfs_ops pdcspath_attr_ops = {
35811+static const struct sysfs_ops pdcspath_attr_ops = {
35812 .show = pdcspath_attr_show,
35813 .store = pdcspath_attr_store,
35814 };
35815diff -urNp linux-2.6.32.45/drivers/parport/procfs.c linux-2.6.32.45/drivers/parport/procfs.c
35816--- linux-2.6.32.45/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
35817+++ linux-2.6.32.45/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
35818@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
35819
35820 *ppos += len;
35821
35822- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35823+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35824 }
35825
35826 #ifdef CONFIG_PARPORT_1284
35827@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
35828
35829 *ppos += len;
35830
35831- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35832+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35833 }
35834 #endif /* IEEE1284.3 support. */
35835
35836diff -urNp linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c
35837--- linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
35838+++ linux-2.6.32.45/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
35839@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
35840 }
35841
35842
35843-static struct acpi_dock_ops acpiphp_dock_ops = {
35844+static const struct acpi_dock_ops acpiphp_dock_ops = {
35845 .handler = handle_hotplug_event_func,
35846 };
35847
35848diff -urNp linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h
35849--- linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h 2011-03-27 14:31:47.000000000 -0400
35850+++ linux-2.6.32.45/drivers/pci/hotplug/cpci_hotplug.h 2011-08-05 20:33:55.000000000 -0400
35851@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35852 int (*hardware_test) (struct slot* slot, u32 value);
35853 u8 (*get_power) (struct slot* slot);
35854 int (*set_power) (struct slot* slot, int value);
35855-};
35856+} __no_const;
35857
35858 struct cpci_hp_controller {
35859 unsigned int irq;
35860diff -urNp linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c
35861--- linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
35862+++ linux-2.6.32.45/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
35863@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
35864
35865 void compaq_nvram_init (void __iomem *rom_start)
35866 {
35867+
35868+#ifndef CONFIG_PAX_KERNEXEC
35869 if (rom_start) {
35870 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35871 }
35872+#endif
35873+
35874 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35875
35876 /* initialize our int15 lock */
35877diff -urNp linux-2.6.32.45/drivers/pci/hotplug/fakephp.c linux-2.6.32.45/drivers/pci/hotplug/fakephp.c
35878--- linux-2.6.32.45/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
35879+++ linux-2.6.32.45/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
35880@@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
35881 }
35882
35883 static struct kobj_type legacy_ktype = {
35884- .sysfs_ops = &(struct sysfs_ops){
35885+ .sysfs_ops = &(const struct sysfs_ops){
35886 .store = legacy_store, .show = legacy_show
35887 },
35888 .release = &legacy_release,
35889diff -urNp linux-2.6.32.45/drivers/pci/intel-iommu.c linux-2.6.32.45/drivers/pci/intel-iommu.c
35890--- linux-2.6.32.45/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
35891+++ linux-2.6.32.45/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
35892@@ -2643,7 +2643,7 @@ error:
35893 return 0;
35894 }
35895
35896-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
35897+dma_addr_t intel_map_page(struct device *dev, struct page *page,
35898 unsigned long offset, size_t size,
35899 enum dma_data_direction dir,
35900 struct dma_attrs *attrs)
35901@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
35902 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
35903 }
35904
35905-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35906+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
35907 size_t size, enum dma_data_direction dir,
35908 struct dma_attrs *attrs)
35909 {
35910@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
35911 }
35912 }
35913
35914-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
35915+void *intel_alloc_coherent(struct device *hwdev, size_t size,
35916 dma_addr_t *dma_handle, gfp_t flags)
35917 {
35918 void *vaddr;
35919@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
35920 return NULL;
35921 }
35922
35923-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35924+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
35925 dma_addr_t dma_handle)
35926 {
35927 int order;
35928@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
35929 free_pages((unsigned long)vaddr, order);
35930 }
35931
35932-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35933+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
35934 int nelems, enum dma_data_direction dir,
35935 struct dma_attrs *attrs)
35936 {
35937@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
35938 return nelems;
35939 }
35940
35941-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35942+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
35943 enum dma_data_direction dir, struct dma_attrs *attrs)
35944 {
35945 int i;
35946@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
35947 return nelems;
35948 }
35949
35950-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35951+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
35952 {
35953 return !dma_addr;
35954 }
35955
35956-struct dma_map_ops intel_dma_ops = {
35957+const struct dma_map_ops intel_dma_ops = {
35958 .alloc_coherent = intel_alloc_coherent,
35959 .free_coherent = intel_free_coherent,
35960 .map_sg = intel_map_sg,
35961diff -urNp linux-2.6.32.45/drivers/pci/pcie/aspm.c linux-2.6.32.45/drivers/pci/pcie/aspm.c
35962--- linux-2.6.32.45/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
35963+++ linux-2.6.32.45/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
35964@@ -27,9 +27,9 @@
35965 #define MODULE_PARAM_PREFIX "pcie_aspm."
35966
35967 /* Note: those are not register definitions */
35968-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35969-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35970-#define ASPM_STATE_L1 (4) /* L1 state */
35971+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35972+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35973+#define ASPM_STATE_L1 (4U) /* L1 state */
35974 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35975 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35976
35977diff -urNp linux-2.6.32.45/drivers/pci/probe.c linux-2.6.32.45/drivers/pci/probe.c
35978--- linux-2.6.32.45/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
35979+++ linux-2.6.32.45/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
35980@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
35981 return ret;
35982 }
35983
35984-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
35985+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
35986 struct device_attribute *attr,
35987 char *buf)
35988 {
35989 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
35990 }
35991
35992-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
35993+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
35994 struct device_attribute *attr,
35995 char *buf)
35996 {
35997diff -urNp linux-2.6.32.45/drivers/pci/proc.c linux-2.6.32.45/drivers/pci/proc.c
35998--- linux-2.6.32.45/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
35999+++ linux-2.6.32.45/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
36000@@ -480,7 +480,16 @@ static const struct file_operations proc
36001 static int __init pci_proc_init(void)
36002 {
36003 struct pci_dev *dev = NULL;
36004+
36005+#ifdef CONFIG_GRKERNSEC_PROC_ADD
36006+#ifdef CONFIG_GRKERNSEC_PROC_USER
36007+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
36008+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
36009+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
36010+#endif
36011+#else
36012 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
36013+#endif
36014 proc_create("devices", 0, proc_bus_pci_dir,
36015 &proc_bus_pci_dev_operations);
36016 proc_initialized = 1;
36017diff -urNp linux-2.6.32.45/drivers/pci/slot.c linux-2.6.32.45/drivers/pci/slot.c
36018--- linux-2.6.32.45/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
36019+++ linux-2.6.32.45/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
36020@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
36021 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
36022 }
36023
36024-static struct sysfs_ops pci_slot_sysfs_ops = {
36025+static const struct sysfs_ops pci_slot_sysfs_ops = {
36026 .show = pci_slot_attr_show,
36027 .store = pci_slot_attr_store,
36028 };
36029diff -urNp linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c
36030--- linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
36031+++ linux-2.6.32.45/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
36032@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
36033 return -EFAULT;
36034 }
36035 }
36036- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
36037+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
36038 if (!buf)
36039 return -ENOMEM;
36040
36041diff -urNp linux-2.6.32.45/drivers/platform/x86/acer-wmi.c linux-2.6.32.45/drivers/platform/x86/acer-wmi.c
36042--- linux-2.6.32.45/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
36043+++ linux-2.6.32.45/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
36044@@ -918,7 +918,7 @@ static int update_bl_status(struct backl
36045 return 0;
36046 }
36047
36048-static struct backlight_ops acer_bl_ops = {
36049+static const struct backlight_ops acer_bl_ops = {
36050 .get_brightness = read_brightness,
36051 .update_status = update_bl_status,
36052 };
36053diff -urNp linux-2.6.32.45/drivers/platform/x86/asus_acpi.c linux-2.6.32.45/drivers/platform/x86/asus_acpi.c
36054--- linux-2.6.32.45/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
36055+++ linux-2.6.32.45/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
36056@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
36057 return 0;
36058 }
36059
36060-static struct backlight_ops asus_backlight_data = {
36061+static const struct backlight_ops asus_backlight_data = {
36062 .get_brightness = read_brightness,
36063 .update_status = set_brightness_status,
36064 };
36065diff -urNp linux-2.6.32.45/drivers/platform/x86/asus-laptop.c linux-2.6.32.45/drivers/platform/x86/asus-laptop.c
36066--- linux-2.6.32.45/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
36067+++ linux-2.6.32.45/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
36068@@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
36069 */
36070 static int read_brightness(struct backlight_device *bd);
36071 static int update_bl_status(struct backlight_device *bd);
36072-static struct backlight_ops asusbl_ops = {
36073+static const struct backlight_ops asusbl_ops = {
36074 .get_brightness = read_brightness,
36075 .update_status = update_bl_status,
36076 };
36077diff -urNp linux-2.6.32.45/drivers/platform/x86/compal-laptop.c linux-2.6.32.45/drivers/platform/x86/compal-laptop.c
36078--- linux-2.6.32.45/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
36079+++ linux-2.6.32.45/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
36080@@ -163,7 +163,7 @@ static int bl_update_status(struct backl
36081 return set_lcd_level(b->props.brightness);
36082 }
36083
36084-static struct backlight_ops compalbl_ops = {
36085+static const struct backlight_ops compalbl_ops = {
36086 .get_brightness = bl_get_brightness,
36087 .update_status = bl_update_status,
36088 };
36089diff -urNp linux-2.6.32.45/drivers/platform/x86/dell-laptop.c linux-2.6.32.45/drivers/platform/x86/dell-laptop.c
36090--- linux-2.6.32.45/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
36091+++ linux-2.6.32.45/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
36092@@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
36093 return buffer.output[1];
36094 }
36095
36096-static struct backlight_ops dell_ops = {
36097+static const struct backlight_ops dell_ops = {
36098 .get_brightness = dell_get_intensity,
36099 .update_status = dell_send_intensity,
36100 };
36101diff -urNp linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c
36102--- linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
36103+++ linux-2.6.32.45/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
36104@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
36105 */
36106 static int read_brightness(struct backlight_device *bd);
36107 static int update_bl_status(struct backlight_device *bd);
36108-static struct backlight_ops eeepcbl_ops = {
36109+static const struct backlight_ops eeepcbl_ops = {
36110 .get_brightness = read_brightness,
36111 .update_status = update_bl_status,
36112 };
36113diff -urNp linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c
36114--- linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
36115+++ linux-2.6.32.45/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
36116@@ -436,7 +436,7 @@ static int bl_update_status(struct backl
36117 return ret;
36118 }
36119
36120-static struct backlight_ops fujitsubl_ops = {
36121+static const struct backlight_ops fujitsubl_ops = {
36122 .get_brightness = bl_get_brightness,
36123 .update_status = bl_update_status,
36124 };
36125diff -urNp linux-2.6.32.45/drivers/platform/x86/msi-laptop.c linux-2.6.32.45/drivers/platform/x86/msi-laptop.c
36126--- linux-2.6.32.45/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
36127+++ linux-2.6.32.45/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
36128@@ -161,7 +161,7 @@ static int bl_update_status(struct backl
36129 return set_lcd_level(b->props.brightness);
36130 }
36131
36132-static struct backlight_ops msibl_ops = {
36133+static const struct backlight_ops msibl_ops = {
36134 .get_brightness = bl_get_brightness,
36135 .update_status = bl_update_status,
36136 };
36137diff -urNp linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c
36138--- linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
36139+++ linux-2.6.32.45/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
36140@@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
36141 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
36142 }
36143
36144-static struct backlight_ops pcc_backlight_ops = {
36145+static const struct backlight_ops pcc_backlight_ops = {
36146 .get_brightness = bl_get,
36147 .update_status = bl_set_status,
36148 };
36149diff -urNp linux-2.6.32.45/drivers/platform/x86/sony-laptop.c linux-2.6.32.45/drivers/platform/x86/sony-laptop.c
36150--- linux-2.6.32.45/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
36151+++ linux-2.6.32.45/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
36152@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
36153 }
36154
36155 static struct backlight_device *sony_backlight_device;
36156-static struct backlight_ops sony_backlight_ops = {
36157+static const struct backlight_ops sony_backlight_ops = {
36158 .update_status = sony_backlight_update_status,
36159 .get_brightness = sony_backlight_get_brightness,
36160 };
36161diff -urNp linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c
36162--- linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
36163+++ linux-2.6.32.45/drivers/platform/x86/thinkpad_acpi.c 2011-08-05 20:33:55.000000000 -0400
36164@@ -2137,7 +2137,7 @@ static int hotkey_mask_get(void)
36165 return 0;
36166 }
36167
36168-void static hotkey_mask_warn_incomplete_mask(void)
36169+static void hotkey_mask_warn_incomplete_mask(void)
36170 {
36171 /* log only what the user can fix... */
36172 const u32 wantedmask = hotkey_driver_mask &
36173@@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
36174 BACKLIGHT_UPDATE_HOTKEY);
36175 }
36176
36177-static struct backlight_ops ibm_backlight_data = {
36178+static const struct backlight_ops ibm_backlight_data = {
36179 .get_brightness = brightness_get,
36180 .update_status = brightness_update_status,
36181 };
36182diff -urNp linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c
36183--- linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
36184+++ linux-2.6.32.45/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
36185@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
36186 return AE_OK;
36187 }
36188
36189-static struct backlight_ops toshiba_backlight_data = {
36190+static const struct backlight_ops toshiba_backlight_data = {
36191 .get_brightness = get_lcd,
36192 .update_status = set_lcd_status,
36193 };
36194diff -urNp linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c
36195--- linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
36196+++ linux-2.6.32.45/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
36197@@ -60,7 +60,7 @@ do { \
36198 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
36199 } while(0)
36200
36201-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
36202+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
36203 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
36204
36205 /*
36206@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
36207
36208 cpu = get_cpu();
36209 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
36210+
36211+ pax_open_kernel();
36212 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
36213+ pax_close_kernel();
36214
36215 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
36216 spin_lock_irqsave(&pnp_bios_lock, flags);
36217@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
36218 :"memory");
36219 spin_unlock_irqrestore(&pnp_bios_lock, flags);
36220
36221+ pax_open_kernel();
36222 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
36223+ pax_close_kernel();
36224+
36225 put_cpu();
36226
36227 /* If we get here and this is set then the PnP BIOS faulted on us. */
36228@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
36229 return status;
36230 }
36231
36232-void pnpbios_calls_init(union pnp_bios_install_struct *header)
36233+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
36234 {
36235 int i;
36236
36237@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
36238 pnp_bios_callpoint.offset = header->fields.pm16offset;
36239 pnp_bios_callpoint.segment = PNP_CS16;
36240
36241+ pax_open_kernel();
36242+
36243 for_each_possible_cpu(i) {
36244 struct desc_struct *gdt = get_cpu_gdt_table(i);
36245 if (!gdt)
36246@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
36247 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
36248 (unsigned long)__va(header->fields.pm16dseg));
36249 }
36250+
36251+ pax_close_kernel();
36252 }
36253diff -urNp linux-2.6.32.45/drivers/pnp/resource.c linux-2.6.32.45/drivers/pnp/resource.c
36254--- linux-2.6.32.45/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
36255+++ linux-2.6.32.45/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
36256@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
36257 return 1;
36258
36259 /* check if the resource is valid */
36260- if (*irq < 0 || *irq > 15)
36261+ if (*irq > 15)
36262 return 0;
36263
36264 /* check if the resource is reserved */
36265@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
36266 return 1;
36267
36268 /* check if the resource is valid */
36269- if (*dma < 0 || *dma == 4 || *dma > 7)
36270+ if (*dma == 4 || *dma > 7)
36271 return 0;
36272
36273 /* check if the resource is reserved */
36274diff -urNp linux-2.6.32.45/drivers/power/bq27x00_battery.c linux-2.6.32.45/drivers/power/bq27x00_battery.c
36275--- linux-2.6.32.45/drivers/power/bq27x00_battery.c 2011-03-27 14:31:47.000000000 -0400
36276+++ linux-2.6.32.45/drivers/power/bq27x00_battery.c 2011-08-05 20:33:55.000000000 -0400
36277@@ -44,7 +44,7 @@ struct bq27x00_device_info;
36278 struct bq27x00_access_methods {
36279 int (*read)(u8 reg, int *rt_value, int b_single,
36280 struct bq27x00_device_info *di);
36281-};
36282+} __no_const;
36283
36284 struct bq27x00_device_info {
36285 struct device *dev;
36286diff -urNp linux-2.6.32.45/drivers/rtc/rtc-dev.c linux-2.6.32.45/drivers/rtc/rtc-dev.c
36287--- linux-2.6.32.45/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
36288+++ linux-2.6.32.45/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
36289@@ -14,6 +14,7 @@
36290 #include <linux/module.h>
36291 #include <linux/rtc.h>
36292 #include <linux/sched.h>
36293+#include <linux/grsecurity.h>
36294 #include "rtc-core.h"
36295
36296 static dev_t rtc_devt;
36297@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
36298 if (copy_from_user(&tm, uarg, sizeof(tm)))
36299 return -EFAULT;
36300
36301+ gr_log_timechange();
36302+
36303 return rtc_set_time(rtc, &tm);
36304
36305 case RTC_PIE_ON:
36306diff -urNp linux-2.6.32.45/drivers/s390/cio/qdio_perf.c linux-2.6.32.45/drivers/s390/cio/qdio_perf.c
36307--- linux-2.6.32.45/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
36308+++ linux-2.6.32.45/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
36309@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
36310 static int qdio_perf_proc_show(struct seq_file *m, void *v)
36311 {
36312 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
36313- (long)atomic_long_read(&perf_stats.qdio_int));
36314+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
36315 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
36316- (long)atomic_long_read(&perf_stats.pci_int));
36317+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
36318 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
36319- (long)atomic_long_read(&perf_stats.thin_int));
36320+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
36321 seq_printf(m, "\n");
36322 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
36323- (long)atomic_long_read(&perf_stats.tasklet_inbound));
36324+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
36325 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
36326- (long)atomic_long_read(&perf_stats.tasklet_outbound));
36327+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
36328 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
36329- (long)atomic_long_read(&perf_stats.tasklet_thinint),
36330- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
36331+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
36332+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
36333 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
36334- (long)atomic_long_read(&perf_stats.thinint_inbound),
36335- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
36336+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
36337+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
36338 seq_printf(m, "\n");
36339 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
36340- (long)atomic_long_read(&perf_stats.siga_in));
36341+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
36342 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
36343- (long)atomic_long_read(&perf_stats.siga_out));
36344+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
36345 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
36346- (long)atomic_long_read(&perf_stats.siga_sync));
36347+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
36348 seq_printf(m, "\n");
36349 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
36350- (long)atomic_long_read(&perf_stats.inbound_handler));
36351+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
36352 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
36353- (long)atomic_long_read(&perf_stats.outbound_handler));
36354+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
36355 seq_printf(m, "\n");
36356 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
36357- (long)atomic_long_read(&perf_stats.fast_requeue));
36358+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
36359 seq_printf(m, "Number of outbound target full condition\t: %li\n",
36360- (long)atomic_long_read(&perf_stats.outbound_target_full));
36361+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
36362 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
36363- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
36364+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
36365 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
36366- (long)atomic_long_read(&perf_stats.debug_stop_polling));
36367+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
36368 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
36369- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
36370+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
36371 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
36372- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
36373- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
36374+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
36375+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
36376 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
36377- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
36378- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
36379+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
36380+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
36381 seq_printf(m, "\n");
36382 return 0;
36383 }
36384diff -urNp linux-2.6.32.45/drivers/s390/cio/qdio_perf.h linux-2.6.32.45/drivers/s390/cio/qdio_perf.h
36385--- linux-2.6.32.45/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
36386+++ linux-2.6.32.45/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
36387@@ -13,46 +13,46 @@
36388
36389 struct qdio_perf_stats {
36390 /* interrupt handler calls */
36391- atomic_long_t qdio_int;
36392- atomic_long_t pci_int;
36393- atomic_long_t thin_int;
36394+ atomic_long_unchecked_t qdio_int;
36395+ atomic_long_unchecked_t pci_int;
36396+ atomic_long_unchecked_t thin_int;
36397
36398 /* tasklet runs */
36399- atomic_long_t tasklet_inbound;
36400- atomic_long_t tasklet_outbound;
36401- atomic_long_t tasklet_thinint;
36402- atomic_long_t tasklet_thinint_loop;
36403- atomic_long_t thinint_inbound;
36404- atomic_long_t thinint_inbound_loop;
36405- atomic_long_t thinint_inbound_loop2;
36406+ atomic_long_unchecked_t tasklet_inbound;
36407+ atomic_long_unchecked_t tasklet_outbound;
36408+ atomic_long_unchecked_t tasklet_thinint;
36409+ atomic_long_unchecked_t tasklet_thinint_loop;
36410+ atomic_long_unchecked_t thinint_inbound;
36411+ atomic_long_unchecked_t thinint_inbound_loop;
36412+ atomic_long_unchecked_t thinint_inbound_loop2;
36413
36414 /* signal adapter calls */
36415- atomic_long_t siga_out;
36416- atomic_long_t siga_in;
36417- atomic_long_t siga_sync;
36418+ atomic_long_unchecked_t siga_out;
36419+ atomic_long_unchecked_t siga_in;
36420+ atomic_long_unchecked_t siga_sync;
36421
36422 /* misc */
36423- atomic_long_t inbound_handler;
36424- atomic_long_t outbound_handler;
36425- atomic_long_t fast_requeue;
36426- atomic_long_t outbound_target_full;
36427+ atomic_long_unchecked_t inbound_handler;
36428+ atomic_long_unchecked_t outbound_handler;
36429+ atomic_long_unchecked_t fast_requeue;
36430+ atomic_long_unchecked_t outbound_target_full;
36431
36432 /* for debugging */
36433- atomic_long_t debug_tl_out_timer;
36434- atomic_long_t debug_stop_polling;
36435- atomic_long_t debug_eqbs_all;
36436- atomic_long_t debug_eqbs_incomplete;
36437- atomic_long_t debug_sqbs_all;
36438- atomic_long_t debug_sqbs_incomplete;
36439+ atomic_long_unchecked_t debug_tl_out_timer;
36440+ atomic_long_unchecked_t debug_stop_polling;
36441+ atomic_long_unchecked_t debug_eqbs_all;
36442+ atomic_long_unchecked_t debug_eqbs_incomplete;
36443+ atomic_long_unchecked_t debug_sqbs_all;
36444+ atomic_long_unchecked_t debug_sqbs_incomplete;
36445 };
36446
36447 extern struct qdio_perf_stats perf_stats;
36448 extern int qdio_performance_stats;
36449
36450-static inline void qdio_perf_stat_inc(atomic_long_t *count)
36451+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
36452 {
36453 if (qdio_performance_stats)
36454- atomic_long_inc(count);
36455+ atomic_long_inc_unchecked(count);
36456 }
36457
36458 int qdio_setup_perf_stats(void);
36459diff -urNp linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h
36460--- linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h 2011-03-27 14:31:47.000000000 -0400
36461+++ linux-2.6.32.45/drivers/scsi/aacraid/aacraid.h 2011-08-05 20:33:55.000000000 -0400
36462@@ -471,7 +471,7 @@ struct adapter_ops
36463 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36464 /* Administrative operations */
36465 int (*adapter_comm)(struct aac_dev * dev, int comm);
36466-};
36467+} __no_const;
36468
36469 /*
36470 * Define which interrupt handler needs to be installed
36471diff -urNp linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c
36472--- linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
36473+++ linux-2.6.32.45/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
36474@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
36475 u32 actual_fibsize64, actual_fibsize = 0;
36476 int i;
36477
36478+ pax_track_stack();
36479
36480 if (dev->in_reset) {
36481 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
36482diff -urNp linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c
36483--- linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
36484+++ linux-2.6.32.45/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
36485@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
36486 flash_error_table[i].reason);
36487 }
36488
36489-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
36490+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
36491 asd_show_update_bios, asd_store_update_bios);
36492
36493 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
36494diff -urNp linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h
36495--- linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h 2011-03-27 14:31:47.000000000 -0400
36496+++ linux-2.6.32.45/drivers/scsi/bfa/bfa_iocfc.h 2011-08-05 20:33:55.000000000 -0400
36497@@ -61,7 +61,7 @@ struct bfa_hwif_s {
36498 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
36499 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
36500 u32 *nvecs, u32 *maxvec);
36501-};
36502+} __no_const;
36503 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36504
36505 struct bfa_iocfc_s {
36506diff -urNp linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h
36507--- linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h 2011-03-27 14:31:47.000000000 -0400
36508+++ linux-2.6.32.45/drivers/scsi/bfa/bfa_ioc.h 2011-08-05 20:33:55.000000000 -0400
36509@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
36510 bfa_ioc_disable_cbfn_t disable_cbfn;
36511 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36512 bfa_ioc_reset_cbfn_t reset_cbfn;
36513-};
36514+} __no_const;
36515
36516 /**
36517 * Heartbeat failure notification queue element.
36518diff -urNp linux-2.6.32.45/drivers/scsi/BusLogic.c linux-2.6.32.45/drivers/scsi/BusLogic.c
36519--- linux-2.6.32.45/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
36520+++ linux-2.6.32.45/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
36521@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
36522 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
36523 *PrototypeHostAdapter)
36524 {
36525+ pax_track_stack();
36526+
36527 /*
36528 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
36529 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
36530diff -urNp linux-2.6.32.45/drivers/scsi/dpt_i2o.c linux-2.6.32.45/drivers/scsi/dpt_i2o.c
36531--- linux-2.6.32.45/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
36532+++ linux-2.6.32.45/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
36533@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
36534 dma_addr_t addr;
36535 ulong flags = 0;
36536
36537+ pax_track_stack();
36538+
36539 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
36540 // get user msg size in u32s
36541 if(get_user(size, &user_msg[0])){
36542@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
36543 s32 rcode;
36544 dma_addr_t addr;
36545
36546+ pax_track_stack();
36547+
36548 memset(msg, 0 , sizeof(msg));
36549 len = scsi_bufflen(cmd);
36550 direction = 0x00000000;
36551diff -urNp linux-2.6.32.45/drivers/scsi/eata.c linux-2.6.32.45/drivers/scsi/eata.c
36552--- linux-2.6.32.45/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
36553+++ linux-2.6.32.45/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
36554@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
36555 struct hostdata *ha;
36556 char name[16];
36557
36558+ pax_track_stack();
36559+
36560 sprintf(name, "%s%d", driver_name, j);
36561
36562 if (!request_region(port_base, REGION_SIZE, driver_name)) {
36563diff -urNp linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c
36564--- linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
36565+++ linux-2.6.32.45/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
36566@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
36567 size_t rlen;
36568 size_t dlen;
36569
36570+ pax_track_stack();
36571+
36572 fiph = (struct fip_header *)skb->data;
36573 sub = fiph->fip_subcode;
36574 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
36575diff -urNp linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c
36576--- linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c 2011-03-27 14:31:47.000000000 -0400
36577+++ linux-2.6.32.45/drivers/scsi/fnic/fnic_main.c 2011-08-05 20:33:55.000000000 -0400
36578@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct p
36579 /* Start local port initiatialization */
36580
36581 lp->link_up = 0;
36582- lp->tt = fnic_transport_template;
36583+ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
36584
36585 lp->max_retry_count = fnic->config.flogi_retries;
36586 lp->max_rport_retry_count = fnic->config.plogi_retries;
36587diff -urNp linux-2.6.32.45/drivers/scsi/gdth.c linux-2.6.32.45/drivers/scsi/gdth.c
36588--- linux-2.6.32.45/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
36589+++ linux-2.6.32.45/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
36590@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
36591 ulong flags;
36592 gdth_ha_str *ha;
36593
36594+ pax_track_stack();
36595+
36596 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
36597 return -EFAULT;
36598 ha = gdth_find_ha(ldrv.ionode);
36599@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
36600 gdth_ha_str *ha;
36601 int rval;
36602
36603+ pax_track_stack();
36604+
36605 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
36606 res.number >= MAX_HDRIVES)
36607 return -EFAULT;
36608@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
36609 gdth_ha_str *ha;
36610 int rval;
36611
36612+ pax_track_stack();
36613+
36614 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
36615 return -EFAULT;
36616 ha = gdth_find_ha(gen.ionode);
36617@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
36618 int i;
36619 gdth_cmd_str gdtcmd;
36620 char cmnd[MAX_COMMAND_SIZE];
36621+
36622+ pax_track_stack();
36623+
36624 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
36625
36626 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
36627diff -urNp linux-2.6.32.45/drivers/scsi/gdth_proc.c linux-2.6.32.45/drivers/scsi/gdth_proc.c
36628--- linux-2.6.32.45/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
36629+++ linux-2.6.32.45/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
36630@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
36631 ulong64 paddr;
36632
36633 char cmnd[MAX_COMMAND_SIZE];
36634+
36635+ pax_track_stack();
36636+
36637 memset(cmnd, 0xff, 12);
36638 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
36639
36640@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
36641 gdth_hget_str *phg;
36642 char cmnd[MAX_COMMAND_SIZE];
36643
36644+ pax_track_stack();
36645+
36646 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
36647 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
36648 if (!gdtcmd || !estr)
36649diff -urNp linux-2.6.32.45/drivers/scsi/hosts.c linux-2.6.32.45/drivers/scsi/hosts.c
36650--- linux-2.6.32.45/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
36651+++ linux-2.6.32.45/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
36652@@ -40,7 +40,7 @@
36653 #include "scsi_logging.h"
36654
36655
36656-static atomic_t scsi_host_next_hn; /* host_no for next new host */
36657+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36658
36659
36660 static void scsi_host_cls_release(struct device *dev)
36661@@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
36662 * subtract one because we increment first then return, but we need to
36663 * know what the next host number was before increment
36664 */
36665- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36666+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36667 shost->dma_channel = 0xff;
36668
36669 /* These three are default values which can be overridden */
36670diff -urNp linux-2.6.32.45/drivers/scsi/ipr.c linux-2.6.32.45/drivers/scsi/ipr.c
36671--- linux-2.6.32.45/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
36672+++ linux-2.6.32.45/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
36673@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
36674 return true;
36675 }
36676
36677-static struct ata_port_operations ipr_sata_ops = {
36678+static const struct ata_port_operations ipr_sata_ops = {
36679 .phy_reset = ipr_ata_phy_reset,
36680 .hardreset = ipr_sata_reset,
36681 .post_internal_cmd = ipr_ata_post_internal,
36682diff -urNp linux-2.6.32.45/drivers/scsi/ips.h linux-2.6.32.45/drivers/scsi/ips.h
36683--- linux-2.6.32.45/drivers/scsi/ips.h 2011-03-27 14:31:47.000000000 -0400
36684+++ linux-2.6.32.45/drivers/scsi/ips.h 2011-08-05 20:33:55.000000000 -0400
36685@@ -1027,7 +1027,7 @@ typedef struct {
36686 int (*intr)(struct ips_ha *);
36687 void (*enableint)(struct ips_ha *);
36688 uint32_t (*statupd)(struct ips_ha *);
36689-} ips_hw_func_t;
36690+} __no_const ips_hw_func_t;
36691
36692 typedef struct ips_ha {
36693 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36694diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c
36695--- linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c 2011-03-27 14:31:47.000000000 -0400
36696+++ linux-2.6.32.45/drivers/scsi/libfc/fc_disc.c 2011-08-05 20:33:55.000000000 -0400
36697@@ -715,16 +715,16 @@ int fc_disc_init(struct fc_lport *lport)
36698 struct fc_disc *disc;
36699
36700 if (!lport->tt.disc_start)
36701- lport->tt.disc_start = fc_disc_start;
36702+ *(void **)&lport->tt.disc_start = fc_disc_start;
36703
36704 if (!lport->tt.disc_stop)
36705- lport->tt.disc_stop = fc_disc_stop;
36706+ *(void **)&lport->tt.disc_stop = fc_disc_stop;
36707
36708 if (!lport->tt.disc_stop_final)
36709- lport->tt.disc_stop_final = fc_disc_stop_final;
36710+ *(void **)&lport->tt.disc_stop_final = fc_disc_stop_final;
36711
36712 if (!lport->tt.disc_recv_req)
36713- lport->tt.disc_recv_req = fc_disc_recv_req;
36714+ *(void **)&lport->tt.disc_recv_req = fc_disc_recv_req;
36715
36716 disc = &lport->disc;
36717 INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
36718diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c
36719--- linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c 2011-03-27 14:31:47.000000000 -0400
36720+++ linux-2.6.32.45/drivers/scsi/libfc/fc_elsct.c 2011-08-05 20:33:55.000000000 -0400
36721@@ -67,7 +67,7 @@ static struct fc_seq *fc_elsct_send(stru
36722 int fc_elsct_init(struct fc_lport *lport)
36723 {
36724 if (!lport->tt.elsct_send)
36725- lport->tt.elsct_send = fc_elsct_send;
36726+ *(void **)&lport->tt.elsct_send = fc_elsct_send;
36727
36728 return 0;
36729 }
36730diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c
36731--- linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
36732+++ linux-2.6.32.45/drivers/scsi/libfc/fc_exch.c 2011-08-05 20:33:55.000000000 -0400
36733@@ -86,12 +86,12 @@ struct fc_exch_mgr {
36734 * all together if not used XXX
36735 */
36736 struct {
36737- atomic_t no_free_exch;
36738- atomic_t no_free_exch_xid;
36739- atomic_t xid_not_found;
36740- atomic_t xid_busy;
36741- atomic_t seq_not_found;
36742- atomic_t non_bls_resp;
36743+ atomic_unchecked_t no_free_exch;
36744+ atomic_unchecked_t no_free_exch_xid;
36745+ atomic_unchecked_t xid_not_found;
36746+ atomic_unchecked_t xid_busy;
36747+ atomic_unchecked_t seq_not_found;
36748+ atomic_unchecked_t non_bls_resp;
36749 } stats;
36750 };
36751 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
36752@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
36753 /* allocate memory for exchange */
36754 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36755 if (!ep) {
36756- atomic_inc(&mp->stats.no_free_exch);
36757+ atomic_inc_unchecked(&mp->stats.no_free_exch);
36758 goto out;
36759 }
36760 memset(ep, 0, sizeof(*ep));
36761@@ -557,7 +557,7 @@ out:
36762 return ep;
36763 err:
36764 spin_unlock_bh(&pool->lock);
36765- atomic_inc(&mp->stats.no_free_exch_xid);
36766+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36767 mempool_free(ep, mp->ep_pool);
36768 return NULL;
36769 }
36770@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36771 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36772 ep = fc_exch_find(mp, xid);
36773 if (!ep) {
36774- atomic_inc(&mp->stats.xid_not_found);
36775+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36776 reject = FC_RJT_OX_ID;
36777 goto out;
36778 }
36779@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36780 ep = fc_exch_find(mp, xid);
36781 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36782 if (ep) {
36783- atomic_inc(&mp->stats.xid_busy);
36784+ atomic_inc_unchecked(&mp->stats.xid_busy);
36785 reject = FC_RJT_RX_ID;
36786 goto rel;
36787 }
36788@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36789 }
36790 xid = ep->xid; /* get our XID */
36791 } else if (!ep) {
36792- atomic_inc(&mp->stats.xid_not_found);
36793+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36794 reject = FC_RJT_RX_ID; /* XID not found */
36795 goto out;
36796 }
36797@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
36798 } else {
36799 sp = &ep->seq;
36800 if (sp->id != fh->fh_seq_id) {
36801- atomic_inc(&mp->stats.seq_not_found);
36802+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36803 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
36804 goto rel;
36805 }
36806@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
36807
36808 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36809 if (!ep) {
36810- atomic_inc(&mp->stats.xid_not_found);
36811+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36812 goto out;
36813 }
36814 if (ep->esb_stat & ESB_ST_COMPLETE) {
36815- atomic_inc(&mp->stats.xid_not_found);
36816+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36817 goto out;
36818 }
36819 if (ep->rxid == FC_XID_UNKNOWN)
36820 ep->rxid = ntohs(fh->fh_rx_id);
36821 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36822- atomic_inc(&mp->stats.xid_not_found);
36823+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36824 goto rel;
36825 }
36826 if (ep->did != ntoh24(fh->fh_s_id) &&
36827 ep->did != FC_FID_FLOGI) {
36828- atomic_inc(&mp->stats.xid_not_found);
36829+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36830 goto rel;
36831 }
36832 sof = fr_sof(fp);
36833@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
36834 } else {
36835 sp = &ep->seq;
36836 if (sp->id != fh->fh_seq_id) {
36837- atomic_inc(&mp->stats.seq_not_found);
36838+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36839 goto rel;
36840 }
36841 }
36842@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
36843 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36844
36845 if (!sp)
36846- atomic_inc(&mp->stats.xid_not_found);
36847+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36848 else
36849- atomic_inc(&mp->stats.non_bls_resp);
36850+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
36851
36852 fc_frame_free(fp);
36853 }
36854@@ -2027,25 +2027,25 @@ EXPORT_SYMBOL(fc_exch_recv);
36855 int fc_exch_init(struct fc_lport *lp)
36856 {
36857 if (!lp->tt.seq_start_next)
36858- lp->tt.seq_start_next = fc_seq_start_next;
36859+ *(void **)&lp->tt.seq_start_next = fc_seq_start_next;
36860
36861 if (!lp->tt.exch_seq_send)
36862- lp->tt.exch_seq_send = fc_exch_seq_send;
36863+ *(void **)&lp->tt.exch_seq_send = fc_exch_seq_send;
36864
36865 if (!lp->tt.seq_send)
36866- lp->tt.seq_send = fc_seq_send;
36867+ *(void **)&lp->tt.seq_send = fc_seq_send;
36868
36869 if (!lp->tt.seq_els_rsp_send)
36870- lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
36871+ *(void **)&lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
36872
36873 if (!lp->tt.exch_done)
36874- lp->tt.exch_done = fc_exch_done;
36875+ *(void **)&lp->tt.exch_done = fc_exch_done;
36876
36877 if (!lp->tt.exch_mgr_reset)
36878- lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
36879+ *(void **)&lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
36880
36881 if (!lp->tt.seq_exch_abort)
36882- lp->tt.seq_exch_abort = fc_seq_exch_abort;
36883+ *(void **)&lp->tt.seq_exch_abort = fc_seq_exch_abort;
36884
36885 /*
36886 * Initialize fc_cpu_mask and fc_cpu_order. The
36887diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c
36888--- linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c 2011-03-27 14:31:47.000000000 -0400
36889+++ linux-2.6.32.45/drivers/scsi/libfc/fc_fcp.c 2011-08-05 20:33:55.000000000 -0400
36890@@ -2105,13 +2105,13 @@ int fc_fcp_init(struct fc_lport *lp)
36891 struct fc_fcp_internal *si;
36892
36893 if (!lp->tt.fcp_cmd_send)
36894- lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
36895+ *(void **)&lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
36896
36897 if (!lp->tt.fcp_cleanup)
36898- lp->tt.fcp_cleanup = fc_fcp_cleanup;
36899+ *(void **)&lp->tt.fcp_cleanup = fc_fcp_cleanup;
36900
36901 if (!lp->tt.fcp_abort_io)
36902- lp->tt.fcp_abort_io = fc_fcp_abort_io;
36903+ *(void **)&lp->tt.fcp_abort_io = fc_fcp_abort_io;
36904
36905 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
36906 if (!si)
36907diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c
36908--- linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c 2011-03-27 14:31:47.000000000 -0400
36909+++ linux-2.6.32.45/drivers/scsi/libfc/fc_lport.c 2011-08-05 20:33:55.000000000 -0400
36910@@ -569,7 +569,7 @@ int fc_lport_destroy(struct fc_lport *lp
36911 mutex_lock(&lport->lp_mutex);
36912 lport->state = LPORT_ST_DISABLED;
36913 lport->link_up = 0;
36914- lport->tt.frame_send = fc_frame_drop;
36915+ *(void **)&lport->tt.frame_send = fc_frame_drop;
36916 mutex_unlock(&lport->lp_mutex);
36917
36918 lport->tt.fcp_abort_io(lport);
36919@@ -1477,10 +1477,10 @@ EXPORT_SYMBOL(fc_lport_config);
36920 int fc_lport_init(struct fc_lport *lport)
36921 {
36922 if (!lport->tt.lport_recv)
36923- lport->tt.lport_recv = fc_lport_recv_req;
36924+ *(void **)&lport->tt.lport_recv = fc_lport_recv_req;
36925
36926 if (!lport->tt.lport_reset)
36927- lport->tt.lport_reset = fc_lport_reset;
36928+ *(void **)&lport->tt.lport_reset = fc_lport_reset;
36929
36930 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
36931 fc_host_node_name(lport->host) = lport->wwnn;
36932diff -urNp linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c
36933--- linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c 2011-03-27 14:31:47.000000000 -0400
36934+++ linux-2.6.32.45/drivers/scsi/libfc/fc_rport.c 2011-08-05 20:33:55.000000000 -0400
36935@@ -1566,25 +1566,25 @@ static void fc_rport_flush_queue(void)
36936 int fc_rport_init(struct fc_lport *lport)
36937 {
36938 if (!lport->tt.rport_lookup)
36939- lport->tt.rport_lookup = fc_rport_lookup;
36940+ *(void **)&lport->tt.rport_lookup = fc_rport_lookup;
36941
36942 if (!lport->tt.rport_create)
36943- lport->tt.rport_create = fc_rport_create;
36944+ *(void **)&lport->tt.rport_create = fc_rport_create;
36945
36946 if (!lport->tt.rport_login)
36947- lport->tt.rport_login = fc_rport_login;
36948+ *(void **)&lport->tt.rport_login = fc_rport_login;
36949
36950 if (!lport->tt.rport_logoff)
36951- lport->tt.rport_logoff = fc_rport_logoff;
36952+ *(void **)&lport->tt.rport_logoff = fc_rport_logoff;
36953
36954 if (!lport->tt.rport_recv_req)
36955- lport->tt.rport_recv_req = fc_rport_recv_req;
36956+ *(void **)&lport->tt.rport_recv_req = fc_rport_recv_req;
36957
36958 if (!lport->tt.rport_flush_queue)
36959- lport->tt.rport_flush_queue = fc_rport_flush_queue;
36960+ *(void **)&lport->tt.rport_flush_queue = fc_rport_flush_queue;
36961
36962 if (!lport->tt.rport_destroy)
36963- lport->tt.rport_destroy = fc_rport_destroy;
36964+ *(void **)&lport->tt.rport_destroy = fc_rport_destroy;
36965
36966 return 0;
36967 }
36968diff -urNp linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c
36969--- linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
36970+++ linux-2.6.32.45/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
36971@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
36972 }
36973 }
36974
36975-static struct ata_port_operations sas_sata_ops = {
36976+static const struct ata_port_operations sas_sata_ops = {
36977 .phy_reset = sas_ata_phy_reset,
36978 .post_internal_cmd = sas_ata_post_internal,
36979 .qc_defer = ata_std_qc_defer,
36980diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c
36981--- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
36982+++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
36983@@ -124,7 +124,7 @@ struct lpfc_debug {
36984 int len;
36985 };
36986
36987-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36988+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36989 static unsigned long lpfc_debugfs_start_time = 0L;
36990
36991 /**
36992@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
36993 lpfc_debugfs_enable = 0;
36994
36995 len = 0;
36996- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36997+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36998 (lpfc_debugfs_max_disc_trc - 1);
36999 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
37000 dtp = vport->disc_trc + i;
37001@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
37002 lpfc_debugfs_enable = 0;
37003
37004 len = 0;
37005- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
37006+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
37007 (lpfc_debugfs_max_slow_ring_trc - 1);
37008 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
37009 dtp = phba->slow_ring_trc + i;
37010@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
37011 uint32_t *ptr;
37012 char buffer[1024];
37013
37014+ pax_track_stack();
37015+
37016 off = 0;
37017 spin_lock_irq(&phba->hbalock);
37018
37019@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
37020 !vport || !vport->disc_trc)
37021 return;
37022
37023- index = atomic_inc_return(&vport->disc_trc_cnt) &
37024+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
37025 (lpfc_debugfs_max_disc_trc - 1);
37026 dtp = vport->disc_trc + index;
37027 dtp->fmt = fmt;
37028 dtp->data1 = data1;
37029 dtp->data2 = data2;
37030 dtp->data3 = data3;
37031- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
37032+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
37033 dtp->jif = jiffies;
37034 #endif
37035 return;
37036@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
37037 !phba || !phba->slow_ring_trc)
37038 return;
37039
37040- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
37041+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
37042 (lpfc_debugfs_max_slow_ring_trc - 1);
37043 dtp = phba->slow_ring_trc + index;
37044 dtp->fmt = fmt;
37045 dtp->data1 = data1;
37046 dtp->data2 = data2;
37047 dtp->data3 = data3;
37048- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
37049+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
37050 dtp->jif = jiffies;
37051 #endif
37052 return;
37053@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
37054 "slow_ring buffer\n");
37055 goto debug_failed;
37056 }
37057- atomic_set(&phba->slow_ring_trc_cnt, 0);
37058+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
37059 memset(phba->slow_ring_trc, 0,
37060 (sizeof(struct lpfc_debugfs_trc) *
37061 lpfc_debugfs_max_slow_ring_trc));
37062@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
37063 "buffer\n");
37064 goto debug_failed;
37065 }
37066- atomic_set(&vport->disc_trc_cnt, 0);
37067+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
37068
37069 snprintf(name, sizeof(name), "discovery_trace");
37070 vport->debug_disc_trc =
37071diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h
37072--- linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
37073+++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
37074@@ -400,7 +400,7 @@ struct lpfc_vport {
37075 struct dentry *debug_nodelist;
37076 struct dentry *vport_debugfs_root;
37077 struct lpfc_debugfs_trc *disc_trc;
37078- atomic_t disc_trc_cnt;
37079+ atomic_unchecked_t disc_trc_cnt;
37080 #endif
37081 uint8_t stat_data_enabled;
37082 uint8_t stat_data_blocked;
37083@@ -725,8 +725,8 @@ struct lpfc_hba {
37084 struct timer_list fabric_block_timer;
37085 unsigned long bit_flags;
37086 #define FABRIC_COMANDS_BLOCKED 0
37087- atomic_t num_rsrc_err;
37088- atomic_t num_cmd_success;
37089+ atomic_unchecked_t num_rsrc_err;
37090+ atomic_unchecked_t num_cmd_success;
37091 unsigned long last_rsrc_error_time;
37092 unsigned long last_ramp_down_time;
37093 unsigned long last_ramp_up_time;
37094@@ -740,7 +740,7 @@ struct lpfc_hba {
37095 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
37096 struct dentry *debug_slow_ring_trc;
37097 struct lpfc_debugfs_trc *slow_ring_trc;
37098- atomic_t slow_ring_trc_cnt;
37099+ atomic_unchecked_t slow_ring_trc_cnt;
37100 #endif
37101
37102 /* Used for deferred freeing of ELS data buffers */
37103diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c
37104--- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c 2011-03-27 14:31:47.000000000 -0400
37105+++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_init.c 2011-08-05 20:33:55.000000000 -0400
37106@@ -8021,8 +8021,10 @@ lpfc_init(void)
37107 printk(LPFC_COPYRIGHT "\n");
37108
37109 if (lpfc_enable_npiv) {
37110- lpfc_transport_functions.vport_create = lpfc_vport_create;
37111- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
37112+ pax_open_kernel();
37113+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
37114+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
37115+ pax_close_kernel();
37116 }
37117 lpfc_transport_template =
37118 fc_attach_transport(&lpfc_transport_functions);
37119diff -urNp linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c
37120--- linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
37121+++ linux-2.6.32.45/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
37122@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
37123 uint32_t evt_posted;
37124
37125 spin_lock_irqsave(&phba->hbalock, flags);
37126- atomic_inc(&phba->num_rsrc_err);
37127+ atomic_inc_unchecked(&phba->num_rsrc_err);
37128 phba->last_rsrc_error_time = jiffies;
37129
37130 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
37131@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
37132 unsigned long flags;
37133 struct lpfc_hba *phba = vport->phba;
37134 uint32_t evt_posted;
37135- atomic_inc(&phba->num_cmd_success);
37136+ atomic_inc_unchecked(&phba->num_cmd_success);
37137
37138 if (vport->cfg_lun_queue_depth <= queue_depth)
37139 return;
37140@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
37141 int i;
37142 struct lpfc_rport_data *rdata;
37143
37144- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
37145- num_cmd_success = atomic_read(&phba->num_cmd_success);
37146+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
37147+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
37148
37149 vports = lpfc_create_vport_work_array(phba);
37150 if (vports != NULL)
37151@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
37152 }
37153 }
37154 lpfc_destroy_vport_work_array(phba, vports);
37155- atomic_set(&phba->num_rsrc_err, 0);
37156- atomic_set(&phba->num_cmd_success, 0);
37157+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
37158+ atomic_set_unchecked(&phba->num_cmd_success, 0);
37159 }
37160
37161 /**
37162@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
37163 }
37164 }
37165 lpfc_destroy_vport_work_array(phba, vports);
37166- atomic_set(&phba->num_rsrc_err, 0);
37167- atomic_set(&phba->num_cmd_success, 0);
37168+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
37169+ atomic_set_unchecked(&phba->num_cmd_success, 0);
37170 }
37171
37172 /**
37173diff -urNp linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c
37174--- linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
37175+++ linux-2.6.32.45/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
37176@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
37177 int rval;
37178 int i;
37179
37180+ pax_track_stack();
37181+
37182 // Allocate memory for the base list of scb for management module.
37183 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
37184
37185diff -urNp linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c
37186--- linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
37187+++ linux-2.6.32.45/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
37188@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
37189 int nelem = ARRAY_SIZE(get_attrs), a = 0;
37190 int ret;
37191
37192+ pax_track_stack();
37193+
37194 or = osd_start_request(od, GFP_KERNEL);
37195 if (!or)
37196 return -ENOMEM;
37197diff -urNp linux-2.6.32.45/drivers/scsi/pmcraid.c linux-2.6.32.45/drivers/scsi/pmcraid.c
37198--- linux-2.6.32.45/drivers/scsi/pmcraid.c 2011-08-09 18:35:29.000000000 -0400
37199+++ linux-2.6.32.45/drivers/scsi/pmcraid.c 2011-08-09 18:33:59.000000000 -0400
37200@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
37201 res->scsi_dev = scsi_dev;
37202 scsi_dev->hostdata = res;
37203 res->change_detected = 0;
37204- atomic_set(&res->read_failures, 0);
37205- atomic_set(&res->write_failures, 0);
37206+ atomic_set_unchecked(&res->read_failures, 0);
37207+ atomic_set_unchecked(&res->write_failures, 0);
37208 rc = 0;
37209 }
37210 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
37211@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
37212
37213 /* If this was a SCSI read/write command keep count of errors */
37214 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
37215- atomic_inc(&res->read_failures);
37216+ atomic_inc_unchecked(&res->read_failures);
37217 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
37218- atomic_inc(&res->write_failures);
37219+ atomic_inc_unchecked(&res->write_failures);
37220
37221 if (!RES_IS_GSCSI(res->cfg_entry) &&
37222 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
37223@@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(stru
37224
37225 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
37226 /* add resources only after host is added into system */
37227- if (!atomic_read(&pinstance->expose_resources))
37228+ if (!atomic_read_unchecked(&pinstance->expose_resources))
37229 return;
37230
37231 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
37232@@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instan
37233 init_waitqueue_head(&pinstance->reset_wait_q);
37234
37235 atomic_set(&pinstance->outstanding_cmds, 0);
37236- atomic_set(&pinstance->expose_resources, 0);
37237+ atomic_set_unchecked(&pinstance->expose_resources, 0);
37238
37239 INIT_LIST_HEAD(&pinstance->free_res_q);
37240 INIT_LIST_HEAD(&pinstance->used_res_q);
37241@@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
37242 /* Schedule worker thread to handle CCN and take care of adding and
37243 * removing devices to OS
37244 */
37245- atomic_set(&pinstance->expose_resources, 1);
37246+ atomic_set_unchecked(&pinstance->expose_resources, 1);
37247 schedule_work(&pinstance->worker_q);
37248 return rc;
37249
37250diff -urNp linux-2.6.32.45/drivers/scsi/pmcraid.h linux-2.6.32.45/drivers/scsi/pmcraid.h
37251--- linux-2.6.32.45/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
37252+++ linux-2.6.32.45/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
37253@@ -690,7 +690,7 @@ struct pmcraid_instance {
37254 atomic_t outstanding_cmds;
37255
37256 /* should add/delete resources to mid-layer now ?*/
37257- atomic_t expose_resources;
37258+ atomic_unchecked_t expose_resources;
37259
37260 /* Tasklet to handle deferred processing */
37261 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
37262@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
37263 struct list_head queue; /* link to "to be exposed" resources */
37264 struct pmcraid_config_table_entry cfg_entry;
37265 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37266- atomic_t read_failures; /* count of failed READ commands */
37267- atomic_t write_failures; /* count of failed WRITE commands */
37268+ atomic_unchecked_t read_failures; /* count of failed READ commands */
37269+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37270
37271 /* To indicate add/delete/modify during CCN */
37272 u8 change_detected;
37273diff -urNp linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h
37274--- linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h 2011-03-27 14:31:47.000000000 -0400
37275+++ linux-2.6.32.45/drivers/scsi/qla2xxx/qla_def.h 2011-08-05 20:33:55.000000000 -0400
37276@@ -2089,7 +2089,7 @@ struct isp_operations {
37277
37278 int (*get_flash_version) (struct scsi_qla_host *, void *);
37279 int (*start_scsi) (srb_t *);
37280-};
37281+} __no_const;
37282
37283 /* MSI-X Support *************************************************************/
37284
37285diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h
37286--- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
37287+++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
37288@@ -240,7 +240,7 @@ struct ddb_entry {
37289 atomic_t retry_relogin_timer; /* Min Time between relogins
37290 * (4000 only) */
37291 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
37292- atomic_t relogin_retry_count; /* Num of times relogin has been
37293+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37294 * retried */
37295
37296 uint16_t port;
37297diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c
37298--- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
37299+++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
37300@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
37301 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
37302 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37303 atomic_set(&ddb_entry->relogin_timer, 0);
37304- atomic_set(&ddb_entry->relogin_retry_count, 0);
37305+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37306 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
37307 list_add_tail(&ddb_entry->list, &ha->ddb_list);
37308 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
37309@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
37310 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
37311 atomic_set(&ddb_entry->port_down_timer,
37312 ha->port_down_retry_count);
37313- atomic_set(&ddb_entry->relogin_retry_count, 0);
37314+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37315 atomic_set(&ddb_entry->relogin_timer, 0);
37316 clear_bit(DF_RELOGIN, &ddb_entry->flags);
37317 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
37318diff -urNp linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c
37319--- linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
37320+++ linux-2.6.32.45/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
37321@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
37322 ddb_entry->fw_ddb_device_state ==
37323 DDB_DS_SESSION_FAILED) {
37324 /* Reset retry relogin timer */
37325- atomic_inc(&ddb_entry->relogin_retry_count);
37326+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37327 DEBUG2(printk("scsi%ld: index[%d] relogin"
37328 " timed out-retrying"
37329 " relogin (%d)\n",
37330 ha->host_no,
37331 ddb_entry->fw_ddb_index,
37332- atomic_read(&ddb_entry->
37333+ atomic_read_unchecked(&ddb_entry->
37334 relogin_retry_count))
37335 );
37336 start_dpc++;
37337diff -urNp linux-2.6.32.45/drivers/scsi/scsi.c linux-2.6.32.45/drivers/scsi/scsi.c
37338--- linux-2.6.32.45/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
37339+++ linux-2.6.32.45/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
37340@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
37341 unsigned long timeout;
37342 int rtn = 0;
37343
37344- atomic_inc(&cmd->device->iorequest_cnt);
37345+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37346
37347 /* check if the device is still usable */
37348 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
37349diff -urNp linux-2.6.32.45/drivers/scsi/scsi_debug.c linux-2.6.32.45/drivers/scsi/scsi_debug.c
37350--- linux-2.6.32.45/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
37351+++ linux-2.6.32.45/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
37352@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
37353 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
37354 unsigned char *cmd = (unsigned char *)scp->cmnd;
37355
37356+ pax_track_stack();
37357+
37358 if ((errsts = check_readiness(scp, 1, devip)))
37359 return errsts;
37360 memset(arr, 0, sizeof(arr));
37361@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
37362 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
37363 unsigned char *cmd = (unsigned char *)scp->cmnd;
37364
37365+ pax_track_stack();
37366+
37367 if ((errsts = check_readiness(scp, 1, devip)))
37368 return errsts;
37369 memset(arr, 0, sizeof(arr));
37370diff -urNp linux-2.6.32.45/drivers/scsi/scsi_lib.c linux-2.6.32.45/drivers/scsi/scsi_lib.c
37371--- linux-2.6.32.45/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
37372+++ linux-2.6.32.45/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
37373@@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
37374
37375 scsi_init_cmd_errh(cmd);
37376 cmd->result = DID_NO_CONNECT << 16;
37377- atomic_inc(&cmd->device->iorequest_cnt);
37378+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37379
37380 /*
37381 * SCSI request completion path will do scsi_device_unbusy(),
37382@@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
37383 */
37384 cmd->serial_number = 0;
37385
37386- atomic_inc(&cmd->device->iodone_cnt);
37387+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
37388 if (cmd->result)
37389- atomic_inc(&cmd->device->ioerr_cnt);
37390+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
37391
37392 disposition = scsi_decide_disposition(cmd);
37393 if (disposition != SUCCESS &&
37394diff -urNp linux-2.6.32.45/drivers/scsi/scsi_sysfs.c linux-2.6.32.45/drivers/scsi/scsi_sysfs.c
37395--- linux-2.6.32.45/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:34.000000000 -0400
37396+++ linux-2.6.32.45/drivers/scsi/scsi_sysfs.c 2011-06-25 12:56:37.000000000 -0400
37397@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
37398 char *buf) \
37399 { \
37400 struct scsi_device *sdev = to_scsi_device(dev); \
37401- unsigned long long count = atomic_read(&sdev->field); \
37402+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
37403 return snprintf(buf, 20, "0x%llx\n", count); \
37404 } \
37405 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
37406diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c
37407--- linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
37408+++ linux-2.6.32.45/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
37409@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
37410 * Netlink Infrastructure
37411 */
37412
37413-static atomic_t fc_event_seq;
37414+static atomic_unchecked_t fc_event_seq;
37415
37416 /**
37417 * fc_get_event_number - Obtain the next sequential FC event number
37418@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
37419 u32
37420 fc_get_event_number(void)
37421 {
37422- return atomic_add_return(1, &fc_event_seq);
37423+ return atomic_add_return_unchecked(1, &fc_event_seq);
37424 }
37425 EXPORT_SYMBOL(fc_get_event_number);
37426
37427@@ -641,7 +641,7 @@ static __init int fc_transport_init(void
37428 {
37429 int error;
37430
37431- atomic_set(&fc_event_seq, 0);
37432+ atomic_set_unchecked(&fc_event_seq, 0);
37433
37434 error = transport_class_register(&fc_host_class);
37435 if (error)
37436diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c
37437--- linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
37438+++ linux-2.6.32.45/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
37439@@ -81,7 +81,7 @@ struct iscsi_internal {
37440 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
37441 };
37442
37443-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
37444+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
37445 static struct workqueue_struct *iscsi_eh_timer_workq;
37446
37447 /*
37448@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
37449 int err;
37450
37451 ihost = shost->shost_data;
37452- session->sid = atomic_add_return(1, &iscsi_session_nr);
37453+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
37454
37455 if (id == ISCSI_MAX_TARGET) {
37456 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
37457@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
37458 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
37459 ISCSI_TRANSPORT_VERSION);
37460
37461- atomic_set(&iscsi_session_nr, 0);
37462+ atomic_set_unchecked(&iscsi_session_nr, 0);
37463
37464 err = class_register(&iscsi_transport_class);
37465 if (err)
37466diff -urNp linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c
37467--- linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
37468+++ linux-2.6.32.45/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
37469@@ -33,7 +33,7 @@
37470 #include "scsi_transport_srp_internal.h"
37471
37472 struct srp_host_attrs {
37473- atomic_t next_port_id;
37474+ atomic_unchecked_t next_port_id;
37475 };
37476 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37477
37478@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
37479 struct Scsi_Host *shost = dev_to_shost(dev);
37480 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37481
37482- atomic_set(&srp_host->next_port_id, 0);
37483+ atomic_set_unchecked(&srp_host->next_port_id, 0);
37484 return 0;
37485 }
37486
37487@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
37488 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37489 rport->roles = ids->roles;
37490
37491- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37492+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37493 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37494
37495 transport_setup_device(&rport->dev);
37496diff -urNp linux-2.6.32.45/drivers/scsi/sg.c linux-2.6.32.45/drivers/scsi/sg.c
37497--- linux-2.6.32.45/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
37498+++ linux-2.6.32.45/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
37499@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
37500 const struct file_operations * fops;
37501 };
37502
37503-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
37504+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
37505 {"allow_dio", &adio_fops},
37506 {"debug", &debug_fops},
37507 {"def_reserved_size", &dressz_fops},
37508@@ -2307,7 +2307,7 @@ sg_proc_init(void)
37509 {
37510 int k, mask;
37511 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
37512- struct sg_proc_leaf * leaf;
37513+ const struct sg_proc_leaf * leaf;
37514
37515 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
37516 if (!sg_proc_sgp)
37517diff -urNp linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c
37518--- linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
37519+++ linux-2.6.32.45/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
37520@@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
37521 int do_iounmap = 0;
37522 int do_disable_device = 1;
37523
37524+ pax_track_stack();
37525+
37526 memset(&sym_dev, 0, sizeof(sym_dev));
37527 memset(&nvram, 0, sizeof(nvram));
37528 sym_dev.pdev = pdev;
37529diff -urNp linux-2.6.32.45/drivers/serial/kgdboc.c linux-2.6.32.45/drivers/serial/kgdboc.c
37530--- linux-2.6.32.45/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
37531+++ linux-2.6.32.45/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
37532@@ -18,7 +18,7 @@
37533
37534 #define MAX_CONFIG_LEN 40
37535
37536-static struct kgdb_io kgdboc_io_ops;
37537+static const struct kgdb_io kgdboc_io_ops;
37538
37539 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37540 static int configured = -1;
37541@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
37542 module_put(THIS_MODULE);
37543 }
37544
37545-static struct kgdb_io kgdboc_io_ops = {
37546+static const struct kgdb_io kgdboc_io_ops = {
37547 .name = "kgdboc",
37548 .read_char = kgdboc_get_char,
37549 .write_char = kgdboc_put_char,
37550diff -urNp linux-2.6.32.45/drivers/spi/spi.c linux-2.6.32.45/drivers/spi/spi.c
37551--- linux-2.6.32.45/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
37552+++ linux-2.6.32.45/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
37553@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
37554 EXPORT_SYMBOL_GPL(spi_sync);
37555
37556 /* portable code must never pass more than 32 bytes */
37557-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37558+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
37559
37560 static u8 *buf;
37561
37562diff -urNp linux-2.6.32.45/drivers/ssb/driver_gige.c linux-2.6.32.45/drivers/ssb/driver_gige.c
37563--- linux-2.6.32.45/drivers/ssb/driver_gige.c 2011-03-27 14:31:47.000000000 -0400
37564+++ linux-2.6.32.45/drivers/ssb/driver_gige.c 2011-08-05 20:33:55.000000000 -0400
37565@@ -180,8 +180,8 @@ static int ssb_gige_probe(struct ssb_dev
37566 dev->pci_controller.io_resource = &dev->io_resource;
37567 dev->pci_controller.mem_resource = &dev->mem_resource;
37568 dev->pci_controller.io_map_base = 0x800;
37569- dev->pci_ops.read = ssb_gige_pci_read_config;
37570- dev->pci_ops.write = ssb_gige_pci_write_config;
37571+ *(void **)&dev->pci_ops.read = ssb_gige_pci_read_config;
37572+ *(void **)&dev->pci_ops.write = ssb_gige_pci_write_config;
37573
37574 dev->io_resource.name = SSB_GIGE_IO_RES_NAME;
37575 dev->io_resource.start = 0x800;
37576diff -urNp linux-2.6.32.45/drivers/staging/android/binder.c linux-2.6.32.45/drivers/staging/android/binder.c
37577--- linux-2.6.32.45/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
37578+++ linux-2.6.32.45/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
37579@@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
37580 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
37581 }
37582
37583-static struct vm_operations_struct binder_vm_ops = {
37584+static const struct vm_operations_struct binder_vm_ops = {
37585 .open = binder_vma_open,
37586 .close = binder_vma_close,
37587 };
37588diff -urNp linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c
37589--- linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
37590+++ linux-2.6.32.45/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
37591@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
37592 return VM_FAULT_NOPAGE;
37593 }
37594
37595-static struct vm_operations_struct b3dfg_vm_ops = {
37596+static const struct vm_operations_struct b3dfg_vm_ops = {
37597 .fault = b3dfg_vma_fault,
37598 };
37599
37600@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
37601 return r;
37602 }
37603
37604-static struct file_operations b3dfg_fops = {
37605+static const struct file_operations b3dfg_fops = {
37606 .owner = THIS_MODULE,
37607 .open = b3dfg_open,
37608 .release = b3dfg_release,
37609diff -urNp linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c
37610--- linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:35:29.000000000 -0400
37611+++ linux-2.6.32.45/drivers/staging/comedi/comedi_fops.c 2011-08-09 18:34:00.000000000 -0400
37612@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
37613 mutex_unlock(&dev->mutex);
37614 }
37615
37616-static struct vm_operations_struct comedi_vm_ops = {
37617+static const struct vm_operations_struct comedi_vm_ops = {
37618 .close = comedi_unmap,
37619 };
37620
37621diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c
37622--- linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
37623+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
37624@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
37625 static dev_t adsp_devno;
37626 static struct class *adsp_class;
37627
37628-static struct file_operations adsp_fops = {
37629+static const struct file_operations adsp_fops = {
37630 .owner = THIS_MODULE,
37631 .open = adsp_open,
37632 .unlocked_ioctl = adsp_ioctl,
37633diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c
37634--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
37635+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
37636@@ -1022,7 +1022,7 @@ done:
37637 return rc;
37638 }
37639
37640-static struct file_operations audio_aac_fops = {
37641+static const struct file_operations audio_aac_fops = {
37642 .owner = THIS_MODULE,
37643 .open = audio_open,
37644 .release = audio_release,
37645diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c
37646--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
37647+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
37648@@ -833,7 +833,7 @@ done:
37649 return rc;
37650 }
37651
37652-static struct file_operations audio_amrnb_fops = {
37653+static const struct file_operations audio_amrnb_fops = {
37654 .owner = THIS_MODULE,
37655 .open = audamrnb_open,
37656 .release = audamrnb_release,
37657diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c
37658--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
37659+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
37660@@ -805,7 +805,7 @@ dma_fail:
37661 return rc;
37662 }
37663
37664-static struct file_operations audio_evrc_fops = {
37665+static const struct file_operations audio_evrc_fops = {
37666 .owner = THIS_MODULE,
37667 .open = audevrc_open,
37668 .release = audevrc_release,
37669diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c
37670--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
37671+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
37672@@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
37673 return 0;
37674 }
37675
37676-static struct file_operations audio_fops = {
37677+static const struct file_operations audio_fops = {
37678 .owner = THIS_MODULE,
37679 .open = audio_in_open,
37680 .release = audio_in_release,
37681@@ -922,7 +922,7 @@ static struct file_operations audio_fops
37682 .unlocked_ioctl = audio_in_ioctl,
37683 };
37684
37685-static struct file_operations audpre_fops = {
37686+static const struct file_operations audpre_fops = {
37687 .owner = THIS_MODULE,
37688 .open = audpre_open,
37689 .unlocked_ioctl = audpre_ioctl,
37690diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c
37691--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
37692+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
37693@@ -941,7 +941,7 @@ done:
37694 return rc;
37695 }
37696
37697-static struct file_operations audio_mp3_fops = {
37698+static const struct file_operations audio_mp3_fops = {
37699 .owner = THIS_MODULE,
37700 .open = audio_open,
37701 .release = audio_release,
37702diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c
37703--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
37704+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
37705@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
37706 return 0;
37707 }
37708
37709-static struct file_operations audio_fops = {
37710+static const struct file_operations audio_fops = {
37711 .owner = THIS_MODULE,
37712 .open = audio_open,
37713 .release = audio_release,
37714@@ -819,7 +819,7 @@ static struct file_operations audio_fops
37715 .unlocked_ioctl = audio_ioctl,
37716 };
37717
37718-static struct file_operations audpp_fops = {
37719+static const struct file_operations audpp_fops = {
37720 .owner = THIS_MODULE,
37721 .open = audpp_open,
37722 .unlocked_ioctl = audpp_ioctl,
37723diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c
37724--- linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
37725+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
37726@@ -816,7 +816,7 @@ err:
37727 return rc;
37728 }
37729
37730-static struct file_operations audio_qcelp_fops = {
37731+static const struct file_operations audio_qcelp_fops = {
37732 .owner = THIS_MODULE,
37733 .open = audqcelp_open,
37734 .release = audqcelp_release,
37735diff -urNp linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c
37736--- linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
37737+++ linux-2.6.32.45/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
37738@@ -242,7 +242,7 @@ err:
37739 return rc;
37740 }
37741
37742-static struct file_operations snd_fops = {
37743+static const struct file_operations snd_fops = {
37744 .owner = THIS_MODULE,
37745 .open = snd_open,
37746 .release = snd_release,
37747diff -urNp linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c
37748--- linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
37749+++ linux-2.6.32.45/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
37750@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
37751 return 0;
37752 }
37753
37754-static struct file_operations qmi_fops = {
37755+static const struct file_operations qmi_fops = {
37756 .owner = THIS_MODULE,
37757 .read = qmi_read,
37758 .write = qmi_write,
37759diff -urNp linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c
37760--- linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
37761+++ linux-2.6.32.45/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
37762@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
37763 return rc;
37764 }
37765
37766-static struct file_operations rpcrouter_server_fops = {
37767+static const struct file_operations rpcrouter_server_fops = {
37768 .owner = THIS_MODULE,
37769 .open = rpcrouter_open,
37770 .release = rpcrouter_release,
37771@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
37772 .unlocked_ioctl = rpcrouter_ioctl,
37773 };
37774
37775-static struct file_operations rpcrouter_router_fops = {
37776+static const struct file_operations rpcrouter_router_fops = {
37777 .owner = THIS_MODULE,
37778 .open = rpcrouter_open,
37779 .release = rpcrouter_release,
37780diff -urNp linux-2.6.32.45/drivers/staging/dst/dcore.c linux-2.6.32.45/drivers/staging/dst/dcore.c
37781--- linux-2.6.32.45/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
37782+++ linux-2.6.32.45/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
37783@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
37784 return 0;
37785 }
37786
37787-static struct block_device_operations dst_blk_ops = {
37788+static const struct block_device_operations dst_blk_ops = {
37789 .open = dst_bdev_open,
37790 .release = dst_bdev_release,
37791 .owner = THIS_MODULE,
37792@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
37793 n->size = ctl->size;
37794
37795 atomic_set(&n->refcnt, 1);
37796- atomic_long_set(&n->gen, 0);
37797+ atomic_long_set_unchecked(&n->gen, 0);
37798 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
37799
37800 err = dst_node_sysfs_init(n);
37801diff -urNp linux-2.6.32.45/drivers/staging/dst/trans.c linux-2.6.32.45/drivers/staging/dst/trans.c
37802--- linux-2.6.32.45/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
37803+++ linux-2.6.32.45/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
37804@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
37805 t->error = 0;
37806 t->retries = 0;
37807 atomic_set(&t->refcnt, 1);
37808- t->gen = atomic_long_inc_return(&n->gen);
37809+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
37810
37811 t->enc = bio_data_dir(bio);
37812 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
37813diff -urNp linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c
37814--- linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
37815+++ linux-2.6.32.45/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
37816@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
37817 struct net_device_stats *stats = &etdev->net_stats;
37818
37819 if (pMpTcb->Flags & fMP_DEST_BROAD)
37820- atomic_inc(&etdev->Stats.brdcstxmt);
37821+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
37822 else if (pMpTcb->Flags & fMP_DEST_MULTI)
37823- atomic_inc(&etdev->Stats.multixmt);
37824+ atomic_inc_unchecked(&etdev->Stats.multixmt);
37825 else
37826- atomic_inc(&etdev->Stats.unixmt);
37827+ atomic_inc_unchecked(&etdev->Stats.unixmt);
37828
37829 if (pMpTcb->Packet) {
37830 stats->tx_bytes += pMpTcb->Packet->len;
37831diff -urNp linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h
37832--- linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
37833+++ linux-2.6.32.45/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
37834@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
37835 * operations
37836 */
37837 u32 unircv; /* # multicast packets received */
37838- atomic_t unixmt; /* # multicast packets for Tx */
37839+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
37840 u32 multircv; /* # multicast packets received */
37841- atomic_t multixmt; /* # multicast packets for Tx */
37842+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
37843 u32 brdcstrcv; /* # broadcast packets received */
37844- atomic_t brdcstxmt; /* # broadcast packets for Tx */
37845+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
37846 u32 norcvbuf; /* # Rx packets discarded */
37847 u32 noxmtbuf; /* # Tx packets discarded */
37848
37849diff -urNp linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c
37850--- linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
37851+++ linux-2.6.32.45/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
37852@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
37853 return 0;
37854 }
37855
37856-static struct vm_operations_struct go7007_vm_ops = {
37857+static const struct vm_operations_struct go7007_vm_ops = {
37858 .open = go7007_vm_open,
37859 .close = go7007_vm_close,
37860 .fault = go7007_vm_fault,
37861diff -urNp linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c
37862--- linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
37863+++ linux-2.6.32.45/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
37864@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
37865 /* The one and only one */
37866 static struct blkvsc_driver_context g_blkvsc_drv;
37867
37868-static struct block_device_operations block_ops = {
37869+static const struct block_device_operations block_ops = {
37870 .owner = THIS_MODULE,
37871 .open = blkvsc_open,
37872 .release = blkvsc_release,
37873diff -urNp linux-2.6.32.45/drivers/staging/hv/Channel.c linux-2.6.32.45/drivers/staging/hv/Channel.c
37874--- linux-2.6.32.45/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
37875+++ linux-2.6.32.45/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
37876@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
37877
37878 DPRINT_ENTER(VMBUS);
37879
37880- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
37881- atomic_inc(&gVmbusConnection.NextGpadlHandle);
37882+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
37883+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
37884
37885 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
37886 ASSERT(msgInfo != NULL);
37887diff -urNp linux-2.6.32.45/drivers/staging/hv/Hv.c linux-2.6.32.45/drivers/staging/hv/Hv.c
37888--- linux-2.6.32.45/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
37889+++ linux-2.6.32.45/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
37890@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
37891 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
37892 u32 outputAddressHi = outputAddress >> 32;
37893 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
37894- volatile void *hypercallPage = gHvContext.HypercallPage;
37895+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
37896
37897 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
37898 Control, Input, Output);
37899diff -urNp linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c
37900--- linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
37901+++ linux-2.6.32.45/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
37902@@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
37903 to_device_context(root_device_obj);
37904 struct device_context *child_device_ctx =
37905 to_device_context(child_device_obj);
37906- static atomic_t device_num = ATOMIC_INIT(0);
37907+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
37908
37909 DPRINT_ENTER(VMBUS_DRV);
37910
37911@@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
37912
37913 /* Set the device name. Otherwise, device_register() will fail. */
37914 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
37915- atomic_inc_return(&device_num));
37916+ atomic_inc_return_unchecked(&device_num));
37917
37918 /* The new device belongs to this bus */
37919 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
37920diff -urNp linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h
37921--- linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
37922+++ linux-2.6.32.45/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
37923@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
37924 struct VMBUS_CONNECTION {
37925 enum VMBUS_CONNECT_STATE ConnectState;
37926
37927- atomic_t NextGpadlHandle;
37928+ atomic_unchecked_t NextGpadlHandle;
37929
37930 /*
37931 * Represents channel interrupts. Each bit position represents a
37932diff -urNp linux-2.6.32.45/drivers/staging/octeon/ethernet.c linux-2.6.32.45/drivers/staging/octeon/ethernet.c
37933--- linux-2.6.32.45/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
37934+++ linux-2.6.32.45/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
37935@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
37936 * since the RX tasklet also increments it.
37937 */
37938 #ifdef CONFIG_64BIT
37939- atomic64_add(rx_status.dropped_packets,
37940- (atomic64_t *)&priv->stats.rx_dropped);
37941+ atomic64_add_unchecked(rx_status.dropped_packets,
37942+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37943 #else
37944- atomic_add(rx_status.dropped_packets,
37945- (atomic_t *)&priv->stats.rx_dropped);
37946+ atomic_add_unchecked(rx_status.dropped_packets,
37947+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
37948 #endif
37949 }
37950
37951diff -urNp linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c
37952--- linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
37953+++ linux-2.6.32.45/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
37954@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
37955 /* Increment RX stats for virtual ports */
37956 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37957 #ifdef CONFIG_64BIT
37958- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37959- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37960+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37961+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37962 #else
37963- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37964- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37965+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37966+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37967 #endif
37968 }
37969 netif_receive_skb(skb);
37970@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
37971 dev->name);
37972 */
37973 #ifdef CONFIG_64BIT
37974- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37975+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
37976 #else
37977- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37978+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
37979 #endif
37980 dev_kfree_skb_irq(skb);
37981 }
37982diff -urNp linux-2.6.32.45/drivers/staging/panel/panel.c linux-2.6.32.45/drivers/staging/panel/panel.c
37983--- linux-2.6.32.45/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
37984+++ linux-2.6.32.45/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
37985@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
37986 return 0;
37987 }
37988
37989-static struct file_operations lcd_fops = {
37990+static const struct file_operations lcd_fops = {
37991 .write = lcd_write,
37992 .open = lcd_open,
37993 .release = lcd_release,
37994@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
37995 return 0;
37996 }
37997
37998-static struct file_operations keypad_fops = {
37999+static const struct file_operations keypad_fops = {
38000 .read = keypad_read, /* read */
38001 .open = keypad_open, /* open */
38002 .release = keypad_release, /* close */
38003diff -urNp linux-2.6.32.45/drivers/staging/phison/phison.c linux-2.6.32.45/drivers/staging/phison/phison.c
38004--- linux-2.6.32.45/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
38005+++ linux-2.6.32.45/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
38006@@ -43,7 +43,7 @@ static struct scsi_host_template phison_
38007 ATA_BMDMA_SHT(DRV_NAME),
38008 };
38009
38010-static struct ata_port_operations phison_ops = {
38011+static const struct ata_port_operations phison_ops = {
38012 .inherits = &ata_bmdma_port_ops,
38013 .prereset = phison_pre_reset,
38014 };
38015diff -urNp linux-2.6.32.45/drivers/staging/poch/poch.c linux-2.6.32.45/drivers/staging/poch/poch.c
38016--- linux-2.6.32.45/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
38017+++ linux-2.6.32.45/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
38018@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
38019 return 0;
38020 }
38021
38022-static struct file_operations poch_fops = {
38023+static const struct file_operations poch_fops = {
38024 .owner = THIS_MODULE,
38025 .open = poch_open,
38026 .release = poch_release,
38027diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/inode.c linux-2.6.32.45/drivers/staging/pohmelfs/inode.c
38028--- linux-2.6.32.45/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
38029+++ linux-2.6.32.45/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
38030@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
38031 mutex_init(&psb->mcache_lock);
38032 psb->mcache_root = RB_ROOT;
38033 psb->mcache_timeout = msecs_to_jiffies(5000);
38034- atomic_long_set(&psb->mcache_gen, 0);
38035+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
38036
38037 psb->trans_max_pages = 100;
38038
38039@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
38040 INIT_LIST_HEAD(&psb->crypto_ready_list);
38041 INIT_LIST_HEAD(&psb->crypto_active_list);
38042
38043- atomic_set(&psb->trans_gen, 1);
38044+ atomic_set_unchecked(&psb->trans_gen, 1);
38045 atomic_long_set(&psb->total_inodes, 0);
38046
38047 mutex_init(&psb->state_lock);
38048diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c
38049--- linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
38050+++ linux-2.6.32.45/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
38051@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
38052 m->data = data;
38053 m->start = start;
38054 m->size = size;
38055- m->gen = atomic_long_inc_return(&psb->mcache_gen);
38056+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
38057
38058 mutex_lock(&psb->mcache_lock);
38059 err = pohmelfs_mcache_insert(psb, m);
38060diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h
38061--- linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
38062+++ linux-2.6.32.45/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
38063@@ -570,14 +570,14 @@ struct pohmelfs_config;
38064 struct pohmelfs_sb {
38065 struct rb_root mcache_root;
38066 struct mutex mcache_lock;
38067- atomic_long_t mcache_gen;
38068+ atomic_long_unchecked_t mcache_gen;
38069 unsigned long mcache_timeout;
38070
38071 unsigned int idx;
38072
38073 unsigned int trans_retries;
38074
38075- atomic_t trans_gen;
38076+ atomic_unchecked_t trans_gen;
38077
38078 unsigned int crypto_attached_size;
38079 unsigned int crypto_align_size;
38080diff -urNp linux-2.6.32.45/drivers/staging/pohmelfs/trans.c linux-2.6.32.45/drivers/staging/pohmelfs/trans.c
38081--- linux-2.6.32.45/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
38082+++ linux-2.6.32.45/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
38083@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
38084 int err;
38085 struct netfs_cmd *cmd = t->iovec.iov_base;
38086
38087- t->gen = atomic_inc_return(&psb->trans_gen);
38088+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
38089
38090 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
38091 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
38092diff -urNp linux-2.6.32.45/drivers/staging/sep/sep_driver.c linux-2.6.32.45/drivers/staging/sep/sep_driver.c
38093--- linux-2.6.32.45/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
38094+++ linux-2.6.32.45/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
38095@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
38096 static dev_t sep_devno;
38097
38098 /* the files operations structure of the driver */
38099-static struct file_operations sep_file_operations = {
38100+static const struct file_operations sep_file_operations = {
38101 .owner = THIS_MODULE,
38102 .ioctl = sep_ioctl,
38103 .poll = sep_poll,
38104diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci.h linux-2.6.32.45/drivers/staging/usbip/vhci.h
38105--- linux-2.6.32.45/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
38106+++ linux-2.6.32.45/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
38107@@ -92,7 +92,7 @@ struct vhci_hcd {
38108 unsigned resuming:1;
38109 unsigned long re_timeout;
38110
38111- atomic_t seqnum;
38112+ atomic_unchecked_t seqnum;
38113
38114 /*
38115 * NOTE:
38116diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c
38117--- linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
38118+++ linux-2.6.32.45/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
38119@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
38120 return;
38121 }
38122
38123- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
38124+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
38125 if (priv->seqnum == 0xffff)
38126 usbip_uinfo("seqnum max\n");
38127
38128@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
38129 return -ENOMEM;
38130 }
38131
38132- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
38133+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
38134 if (unlink->seqnum == 0xffff)
38135 usbip_uinfo("seqnum max\n");
38136
38137@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
38138 vdev->rhport = rhport;
38139 }
38140
38141- atomic_set(&vhci->seqnum, 0);
38142+ atomic_set_unchecked(&vhci->seqnum, 0);
38143 spin_lock_init(&vhci->lock);
38144
38145
38146diff -urNp linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c
38147--- linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
38148+++ linux-2.6.32.45/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
38149@@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
38150 usbip_uerr("cannot find a urb of seqnum %u\n",
38151 pdu->base.seqnum);
38152 usbip_uinfo("max seqnum %d\n",
38153- atomic_read(&the_controller->seqnum));
38154+ atomic_read_unchecked(&the_controller->seqnum));
38155 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
38156 return;
38157 }
38158diff -urNp linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c
38159--- linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
38160+++ linux-2.6.32.45/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
38161@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
38162 static int __init vme_user_probe(struct device *, int, int);
38163 static int __exit vme_user_remove(struct device *, int, int);
38164
38165-static struct file_operations vme_user_fops = {
38166+static const struct file_operations vme_user_fops = {
38167 .open = vme_user_open,
38168 .release = vme_user_release,
38169 .read = vme_user_read,
38170diff -urNp linux-2.6.32.45/drivers/telephony/ixj.c linux-2.6.32.45/drivers/telephony/ixj.c
38171--- linux-2.6.32.45/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
38172+++ linux-2.6.32.45/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
38173@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
38174 bool mContinue;
38175 char *pIn, *pOut;
38176
38177+ pax_track_stack();
38178+
38179 if (!SCI_Prepare(j))
38180 return 0;
38181
38182diff -urNp linux-2.6.32.45/drivers/uio/uio.c linux-2.6.32.45/drivers/uio/uio.c
38183--- linux-2.6.32.45/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
38184+++ linux-2.6.32.45/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
38185@@ -23,6 +23,7 @@
38186 #include <linux/string.h>
38187 #include <linux/kobject.h>
38188 #include <linux/uio_driver.h>
38189+#include <asm/local.h>
38190
38191 #define UIO_MAX_DEVICES 255
38192
38193@@ -30,10 +31,10 @@ struct uio_device {
38194 struct module *owner;
38195 struct device *dev;
38196 int minor;
38197- atomic_t event;
38198+ atomic_unchecked_t event;
38199 struct fasync_struct *async_queue;
38200 wait_queue_head_t wait;
38201- int vma_count;
38202+ local_t vma_count;
38203 struct uio_info *info;
38204 struct kobject *map_dir;
38205 struct kobject *portio_dir;
38206@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
38207 return entry->show(mem, buf);
38208 }
38209
38210-static struct sysfs_ops map_sysfs_ops = {
38211+static const struct sysfs_ops map_sysfs_ops = {
38212 .show = map_type_show,
38213 };
38214
38215@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
38216 return entry->show(port, buf);
38217 }
38218
38219-static struct sysfs_ops portio_sysfs_ops = {
38220+static const struct sysfs_ops portio_sysfs_ops = {
38221 .show = portio_type_show,
38222 };
38223
38224@@ -255,7 +256,7 @@ static ssize_t show_event(struct device
38225 struct uio_device *idev = dev_get_drvdata(dev);
38226 if (idev)
38227 return sprintf(buf, "%u\n",
38228- (unsigned int)atomic_read(&idev->event));
38229+ (unsigned int)atomic_read_unchecked(&idev->event));
38230 else
38231 return -ENODEV;
38232 }
38233@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
38234 {
38235 struct uio_device *idev = info->uio_dev;
38236
38237- atomic_inc(&idev->event);
38238+ atomic_inc_unchecked(&idev->event);
38239 wake_up_interruptible(&idev->wait);
38240 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38241 }
38242@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
38243 }
38244
38245 listener->dev = idev;
38246- listener->event_count = atomic_read(&idev->event);
38247+ listener->event_count = atomic_read_unchecked(&idev->event);
38248 filep->private_data = listener;
38249
38250 if (idev->info->open) {
38251@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
38252 return -EIO;
38253
38254 poll_wait(filep, &idev->wait, wait);
38255- if (listener->event_count != atomic_read(&idev->event))
38256+ if (listener->event_count != atomic_read_unchecked(&idev->event))
38257 return POLLIN | POLLRDNORM;
38258 return 0;
38259 }
38260@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
38261 do {
38262 set_current_state(TASK_INTERRUPTIBLE);
38263
38264- event_count = atomic_read(&idev->event);
38265+ event_count = atomic_read_unchecked(&idev->event);
38266 if (event_count != listener->event_count) {
38267 if (copy_to_user(buf, &event_count, count))
38268 retval = -EFAULT;
38269@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
38270 static void uio_vma_open(struct vm_area_struct *vma)
38271 {
38272 struct uio_device *idev = vma->vm_private_data;
38273- idev->vma_count++;
38274+ local_inc(&idev->vma_count);
38275 }
38276
38277 static void uio_vma_close(struct vm_area_struct *vma)
38278 {
38279 struct uio_device *idev = vma->vm_private_data;
38280- idev->vma_count--;
38281+ local_dec(&idev->vma_count);
38282 }
38283
38284 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38285@@ -840,7 +841,7 @@ int __uio_register_device(struct module
38286 idev->owner = owner;
38287 idev->info = info;
38288 init_waitqueue_head(&idev->wait);
38289- atomic_set(&idev->event, 0);
38290+ atomic_set_unchecked(&idev->event, 0);
38291
38292 ret = uio_get_minor(idev);
38293 if (ret)
38294diff -urNp linux-2.6.32.45/drivers/usb/atm/usbatm.c linux-2.6.32.45/drivers/usb/atm/usbatm.c
38295--- linux-2.6.32.45/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
38296+++ linux-2.6.32.45/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
38297@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
38298 if (printk_ratelimit())
38299 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38300 __func__, vpi, vci);
38301- atomic_inc(&vcc->stats->rx_err);
38302+ atomic_inc_unchecked(&vcc->stats->rx_err);
38303 return;
38304 }
38305
38306@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
38307 if (length > ATM_MAX_AAL5_PDU) {
38308 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38309 __func__, length, vcc);
38310- atomic_inc(&vcc->stats->rx_err);
38311+ atomic_inc_unchecked(&vcc->stats->rx_err);
38312 goto out;
38313 }
38314
38315@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
38316 if (sarb->len < pdu_length) {
38317 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38318 __func__, pdu_length, sarb->len, vcc);
38319- atomic_inc(&vcc->stats->rx_err);
38320+ atomic_inc_unchecked(&vcc->stats->rx_err);
38321 goto out;
38322 }
38323
38324 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38325 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38326 __func__, vcc);
38327- atomic_inc(&vcc->stats->rx_err);
38328+ atomic_inc_unchecked(&vcc->stats->rx_err);
38329 goto out;
38330 }
38331
38332@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
38333 if (printk_ratelimit())
38334 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38335 __func__, length);
38336- atomic_inc(&vcc->stats->rx_drop);
38337+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38338 goto out;
38339 }
38340
38341@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
38342
38343 vcc->push(vcc, skb);
38344
38345- atomic_inc(&vcc->stats->rx);
38346+ atomic_inc_unchecked(&vcc->stats->rx);
38347 out:
38348 skb_trim(sarb, 0);
38349 }
38350@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
38351 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38352
38353 usbatm_pop(vcc, skb);
38354- atomic_inc(&vcc->stats->tx);
38355+ atomic_inc_unchecked(&vcc->stats->tx);
38356
38357 skb = skb_dequeue(&instance->sndqueue);
38358 }
38359@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
38360 if (!left--)
38361 return sprintf(page,
38362 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38363- atomic_read(&atm_dev->stats.aal5.tx),
38364- atomic_read(&atm_dev->stats.aal5.tx_err),
38365- atomic_read(&atm_dev->stats.aal5.rx),
38366- atomic_read(&atm_dev->stats.aal5.rx_err),
38367- atomic_read(&atm_dev->stats.aal5.rx_drop));
38368+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38369+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38370+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38371+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38372+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38373
38374 if (!left--) {
38375 if (instance->disconnected)
38376diff -urNp linux-2.6.32.45/drivers/usb/class/cdc-wdm.c linux-2.6.32.45/drivers/usb/class/cdc-wdm.c
38377--- linux-2.6.32.45/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
38378+++ linux-2.6.32.45/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
38379@@ -314,7 +314,7 @@ static ssize_t wdm_write
38380 if (r < 0)
38381 goto outnp;
38382
38383- if (!file->f_flags && O_NONBLOCK)
38384+ if (!(file->f_flags & O_NONBLOCK))
38385 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
38386 &desc->flags));
38387 else
38388diff -urNp linux-2.6.32.45/drivers/usb/core/hcd.c linux-2.6.32.45/drivers/usb/core/hcd.c
38389--- linux-2.6.32.45/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
38390+++ linux-2.6.32.45/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
38391@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
38392
38393 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
38394
38395-struct usb_mon_operations *mon_ops;
38396+const struct usb_mon_operations *mon_ops;
38397
38398 /*
38399 * The registration is unlocked.
38400@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
38401 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
38402 */
38403
38404-int usb_mon_register (struct usb_mon_operations *ops)
38405+int usb_mon_register (const struct usb_mon_operations *ops)
38406 {
38407
38408 if (mon_ops)
38409diff -urNp linux-2.6.32.45/drivers/usb/core/hcd.h linux-2.6.32.45/drivers/usb/core/hcd.h
38410--- linux-2.6.32.45/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
38411+++ linux-2.6.32.45/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
38412@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
38413 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
38414
38415 struct usb_mon_operations {
38416- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
38417- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
38418- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
38419+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
38420+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
38421+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
38422 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
38423 };
38424
38425-extern struct usb_mon_operations *mon_ops;
38426+extern const struct usb_mon_operations *mon_ops;
38427
38428 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
38429 {
38430@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
38431 (*mon_ops->urb_complete)(bus, urb, status);
38432 }
38433
38434-int usb_mon_register(struct usb_mon_operations *ops);
38435+int usb_mon_register(const struct usb_mon_operations *ops);
38436 void usb_mon_deregister(void);
38437
38438 #else
38439diff -urNp linux-2.6.32.45/drivers/usb/core/message.c linux-2.6.32.45/drivers/usb/core/message.c
38440--- linux-2.6.32.45/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
38441+++ linux-2.6.32.45/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
38442@@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
38443 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
38444 if (buf) {
38445 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
38446- if (len > 0) {
38447- smallbuf = kmalloc(++len, GFP_NOIO);
38448+ if (len++ > 0) {
38449+ smallbuf = kmalloc(len, GFP_NOIO);
38450 if (!smallbuf)
38451 return buf;
38452 memcpy(smallbuf, buf, len);
38453diff -urNp linux-2.6.32.45/drivers/usb/misc/appledisplay.c linux-2.6.32.45/drivers/usb/misc/appledisplay.c
38454--- linux-2.6.32.45/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
38455+++ linux-2.6.32.45/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
38456@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
38457 return pdata->msgdata[1];
38458 }
38459
38460-static struct backlight_ops appledisplay_bl_data = {
38461+static const struct backlight_ops appledisplay_bl_data = {
38462 .get_brightness = appledisplay_bl_get_brightness,
38463 .update_status = appledisplay_bl_update_status,
38464 };
38465diff -urNp linux-2.6.32.45/drivers/usb/mon/mon_main.c linux-2.6.32.45/drivers/usb/mon/mon_main.c
38466--- linux-2.6.32.45/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
38467+++ linux-2.6.32.45/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
38468@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
38469 /*
38470 * Ops
38471 */
38472-static struct usb_mon_operations mon_ops_0 = {
38473+static const struct usb_mon_operations mon_ops_0 = {
38474 .urb_submit = mon_submit,
38475 .urb_submit_error = mon_submit_error,
38476 .urb_complete = mon_complete,
38477diff -urNp linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h
38478--- linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
38479+++ linux-2.6.32.45/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
38480@@ -192,7 +192,7 @@ struct wahc {
38481 struct list_head xfer_delayed_list;
38482 spinlock_t xfer_list_lock;
38483 struct work_struct xfer_work;
38484- atomic_t xfer_id_count;
38485+ atomic_unchecked_t xfer_id_count;
38486 };
38487
38488
38489@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
38490 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38491 spin_lock_init(&wa->xfer_list_lock);
38492 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38493- atomic_set(&wa->xfer_id_count, 1);
38494+ atomic_set_unchecked(&wa->xfer_id_count, 1);
38495 }
38496
38497 /**
38498diff -urNp linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c
38499--- linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
38500+++ linux-2.6.32.45/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
38501@@ -293,7 +293,7 @@ out:
38502 */
38503 static void wa_xfer_id_init(struct wa_xfer *xfer)
38504 {
38505- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38506+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38507 }
38508
38509 /*
38510diff -urNp linux-2.6.32.45/drivers/uwb/wlp/messages.c linux-2.6.32.45/drivers/uwb/wlp/messages.c
38511--- linux-2.6.32.45/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
38512+++ linux-2.6.32.45/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
38513@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
38514 size_t len = skb->len;
38515 size_t used;
38516 ssize_t result;
38517- struct wlp_nonce enonce, rnonce;
38518+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
38519 enum wlp_assc_error assc_err;
38520 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
38521 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
38522diff -urNp linux-2.6.32.45/drivers/uwb/wlp/sysfs.c linux-2.6.32.45/drivers/uwb/wlp/sysfs.c
38523--- linux-2.6.32.45/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
38524+++ linux-2.6.32.45/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
38525@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
38526 return ret;
38527 }
38528
38529-static
38530-struct sysfs_ops wss_sysfs_ops = {
38531+static const struct sysfs_ops wss_sysfs_ops = {
38532 .show = wlp_wss_attr_show,
38533 .store = wlp_wss_attr_store,
38534 };
38535diff -urNp linux-2.6.32.45/drivers/video/atmel_lcdfb.c linux-2.6.32.45/drivers/video/atmel_lcdfb.c
38536--- linux-2.6.32.45/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
38537+++ linux-2.6.32.45/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
38538@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
38539 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
38540 }
38541
38542-static struct backlight_ops atmel_lcdc_bl_ops = {
38543+static const struct backlight_ops atmel_lcdc_bl_ops = {
38544 .update_status = atmel_bl_update_status,
38545 .get_brightness = atmel_bl_get_brightness,
38546 };
38547diff -urNp linux-2.6.32.45/drivers/video/aty/aty128fb.c linux-2.6.32.45/drivers/video/aty/aty128fb.c
38548--- linux-2.6.32.45/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
38549+++ linux-2.6.32.45/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
38550@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
38551 return bd->props.brightness;
38552 }
38553
38554-static struct backlight_ops aty128_bl_data = {
38555+static const struct backlight_ops aty128_bl_data = {
38556 .get_brightness = aty128_bl_get_brightness,
38557 .update_status = aty128_bl_update_status,
38558 };
38559diff -urNp linux-2.6.32.45/drivers/video/aty/atyfb_base.c linux-2.6.32.45/drivers/video/aty/atyfb_base.c
38560--- linux-2.6.32.45/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
38561+++ linux-2.6.32.45/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
38562@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
38563 return bd->props.brightness;
38564 }
38565
38566-static struct backlight_ops aty_bl_data = {
38567+static const struct backlight_ops aty_bl_data = {
38568 .get_brightness = aty_bl_get_brightness,
38569 .update_status = aty_bl_update_status,
38570 };
38571diff -urNp linux-2.6.32.45/drivers/video/aty/radeon_backlight.c linux-2.6.32.45/drivers/video/aty/radeon_backlight.c
38572--- linux-2.6.32.45/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
38573+++ linux-2.6.32.45/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
38574@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
38575 return bd->props.brightness;
38576 }
38577
38578-static struct backlight_ops radeon_bl_data = {
38579+static const struct backlight_ops radeon_bl_data = {
38580 .get_brightness = radeon_bl_get_brightness,
38581 .update_status = radeon_bl_update_status,
38582 };
38583diff -urNp linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c
38584--- linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
38585+++ linux-2.6.32.45/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
38586@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
38587 return error ? data->current_brightness : reg_val;
38588 }
38589
38590-static struct backlight_ops adp5520_bl_ops = {
38591+static const struct backlight_ops adp5520_bl_ops = {
38592 .update_status = adp5520_bl_update_status,
38593 .get_brightness = adp5520_bl_get_brightness,
38594 };
38595diff -urNp linux-2.6.32.45/drivers/video/backlight/adx_bl.c linux-2.6.32.45/drivers/video/backlight/adx_bl.c
38596--- linux-2.6.32.45/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
38597+++ linux-2.6.32.45/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
38598@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
38599 return 1;
38600 }
38601
38602-static struct backlight_ops adx_backlight_ops = {
38603+static const struct backlight_ops adx_backlight_ops = {
38604 .options = 0,
38605 .update_status = adx_backlight_update_status,
38606 .get_brightness = adx_backlight_get_brightness,
38607diff -urNp linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c
38608--- linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
38609+++ linux-2.6.32.45/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
38610@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
38611 return pwm_channel_enable(&pwmbl->pwmc);
38612 }
38613
38614-static struct backlight_ops atmel_pwm_bl_ops = {
38615+static const struct backlight_ops atmel_pwm_bl_ops = {
38616 .get_brightness = atmel_pwm_bl_get_intensity,
38617 .update_status = atmel_pwm_bl_set_intensity,
38618 };
38619diff -urNp linux-2.6.32.45/drivers/video/backlight/backlight.c linux-2.6.32.45/drivers/video/backlight/backlight.c
38620--- linux-2.6.32.45/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
38621+++ linux-2.6.32.45/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
38622@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
38623 * ERR_PTR() or a pointer to the newly allocated device.
38624 */
38625 struct backlight_device *backlight_device_register(const char *name,
38626- struct device *parent, void *devdata, struct backlight_ops *ops)
38627+ struct device *parent, void *devdata, const struct backlight_ops *ops)
38628 {
38629 struct backlight_device *new_bd;
38630 int rc;
38631diff -urNp linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c
38632--- linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
38633+++ linux-2.6.32.45/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
38634@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
38635 }
38636 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
38637
38638-static struct backlight_ops corgi_bl_ops = {
38639+static const struct backlight_ops corgi_bl_ops = {
38640 .get_brightness = corgi_bl_get_intensity,
38641 .update_status = corgi_bl_update_status,
38642 };
38643diff -urNp linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c
38644--- linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
38645+++ linux-2.6.32.45/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
38646@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
38647 return intensity;
38648 }
38649
38650-static struct backlight_ops cr_backlight_ops = {
38651+static const struct backlight_ops cr_backlight_ops = {
38652 .get_brightness = cr_backlight_get_intensity,
38653 .update_status = cr_backlight_set_intensity,
38654 };
38655diff -urNp linux-2.6.32.45/drivers/video/backlight/da903x_bl.c linux-2.6.32.45/drivers/video/backlight/da903x_bl.c
38656--- linux-2.6.32.45/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
38657+++ linux-2.6.32.45/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
38658@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
38659 return data->current_brightness;
38660 }
38661
38662-static struct backlight_ops da903x_backlight_ops = {
38663+static const struct backlight_ops da903x_backlight_ops = {
38664 .update_status = da903x_backlight_update_status,
38665 .get_brightness = da903x_backlight_get_brightness,
38666 };
38667diff -urNp linux-2.6.32.45/drivers/video/backlight/generic_bl.c linux-2.6.32.45/drivers/video/backlight/generic_bl.c
38668--- linux-2.6.32.45/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
38669+++ linux-2.6.32.45/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
38670@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
38671 }
38672 EXPORT_SYMBOL(corgibl_limit_intensity);
38673
38674-static struct backlight_ops genericbl_ops = {
38675+static const struct backlight_ops genericbl_ops = {
38676 .options = BL_CORE_SUSPENDRESUME,
38677 .get_brightness = genericbl_get_intensity,
38678 .update_status = genericbl_send_intensity,
38679diff -urNp linux-2.6.32.45/drivers/video/backlight/hp680_bl.c linux-2.6.32.45/drivers/video/backlight/hp680_bl.c
38680--- linux-2.6.32.45/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
38681+++ linux-2.6.32.45/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
38682@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
38683 return current_intensity;
38684 }
38685
38686-static struct backlight_ops hp680bl_ops = {
38687+static const struct backlight_ops hp680bl_ops = {
38688 .get_brightness = hp680bl_get_intensity,
38689 .update_status = hp680bl_set_intensity,
38690 };
38691diff -urNp linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c
38692--- linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
38693+++ linux-2.6.32.45/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
38694@@ -93,7 +93,7 @@ out:
38695 return ret;
38696 }
38697
38698-static struct backlight_ops jornada_bl_ops = {
38699+static const struct backlight_ops jornada_bl_ops = {
38700 .get_brightness = jornada_bl_get_brightness,
38701 .update_status = jornada_bl_update_status,
38702 .options = BL_CORE_SUSPENDRESUME,
38703diff -urNp linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c
38704--- linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
38705+++ linux-2.6.32.45/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
38706@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
38707 return kb3886bl_intensity;
38708 }
38709
38710-static struct backlight_ops kb3886bl_ops = {
38711+static const struct backlight_ops kb3886bl_ops = {
38712 .get_brightness = kb3886bl_get_intensity,
38713 .update_status = kb3886bl_send_intensity,
38714 };
38715diff -urNp linux-2.6.32.45/drivers/video/backlight/locomolcd.c linux-2.6.32.45/drivers/video/backlight/locomolcd.c
38716--- linux-2.6.32.45/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
38717+++ linux-2.6.32.45/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
38718@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
38719 return current_intensity;
38720 }
38721
38722-static struct backlight_ops locomobl_data = {
38723+static const struct backlight_ops locomobl_data = {
38724 .get_brightness = locomolcd_get_intensity,
38725 .update_status = locomolcd_set_intensity,
38726 };
38727diff -urNp linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c
38728--- linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
38729+++ linux-2.6.32.45/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
38730@@ -33,7 +33,7 @@ struct dmi_match_data {
38731 unsigned long iostart;
38732 unsigned long iolen;
38733 /* Backlight operations structure. */
38734- struct backlight_ops backlight_ops;
38735+ const struct backlight_ops backlight_ops;
38736 };
38737
38738 /* Module parameters. */
38739diff -urNp linux-2.6.32.45/drivers/video/backlight/omap1_bl.c linux-2.6.32.45/drivers/video/backlight/omap1_bl.c
38740--- linux-2.6.32.45/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
38741+++ linux-2.6.32.45/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
38742@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
38743 return bl->current_intensity;
38744 }
38745
38746-static struct backlight_ops omapbl_ops = {
38747+static const struct backlight_ops omapbl_ops = {
38748 .get_brightness = omapbl_get_intensity,
38749 .update_status = omapbl_update_status,
38750 };
38751diff -urNp linux-2.6.32.45/drivers/video/backlight/progear_bl.c linux-2.6.32.45/drivers/video/backlight/progear_bl.c
38752--- linux-2.6.32.45/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
38753+++ linux-2.6.32.45/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
38754@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
38755 return intensity - HW_LEVEL_MIN;
38756 }
38757
38758-static struct backlight_ops progearbl_ops = {
38759+static const struct backlight_ops progearbl_ops = {
38760 .get_brightness = progearbl_get_intensity,
38761 .update_status = progearbl_set_intensity,
38762 };
38763diff -urNp linux-2.6.32.45/drivers/video/backlight/pwm_bl.c linux-2.6.32.45/drivers/video/backlight/pwm_bl.c
38764--- linux-2.6.32.45/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
38765+++ linux-2.6.32.45/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
38766@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
38767 return bl->props.brightness;
38768 }
38769
38770-static struct backlight_ops pwm_backlight_ops = {
38771+static const struct backlight_ops pwm_backlight_ops = {
38772 .update_status = pwm_backlight_update_status,
38773 .get_brightness = pwm_backlight_get_brightness,
38774 };
38775diff -urNp linux-2.6.32.45/drivers/video/backlight/tosa_bl.c linux-2.6.32.45/drivers/video/backlight/tosa_bl.c
38776--- linux-2.6.32.45/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
38777+++ linux-2.6.32.45/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
38778@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
38779 return props->brightness;
38780 }
38781
38782-static struct backlight_ops bl_ops = {
38783+static const struct backlight_ops bl_ops = {
38784 .get_brightness = tosa_bl_get_brightness,
38785 .update_status = tosa_bl_update_status,
38786 };
38787diff -urNp linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c
38788--- linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
38789+++ linux-2.6.32.45/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
38790@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
38791 return data->current_brightness;
38792 }
38793
38794-static struct backlight_ops wm831x_backlight_ops = {
38795+static const struct backlight_ops wm831x_backlight_ops = {
38796 .options = BL_CORE_SUSPENDRESUME,
38797 .update_status = wm831x_backlight_update_status,
38798 .get_brightness = wm831x_backlight_get_brightness,
38799diff -urNp linux-2.6.32.45/drivers/video/bf54x-lq043fb.c linux-2.6.32.45/drivers/video/bf54x-lq043fb.c
38800--- linux-2.6.32.45/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
38801+++ linux-2.6.32.45/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
38802@@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
38803 return 0;
38804 }
38805
38806-static struct backlight_ops bfin_lq043fb_bl_ops = {
38807+static const struct backlight_ops bfin_lq043fb_bl_ops = {
38808 .get_brightness = bl_get_brightness,
38809 };
38810
38811diff -urNp linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c
38812--- linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
38813+++ linux-2.6.32.45/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
38814@@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
38815 return 0;
38816 }
38817
38818-static struct backlight_ops bfin_lq043fb_bl_ops = {
38819+static const struct backlight_ops bfin_lq043fb_bl_ops = {
38820 .get_brightness = bl_get_brightness,
38821 };
38822
38823diff -urNp linux-2.6.32.45/drivers/video/fbcmap.c linux-2.6.32.45/drivers/video/fbcmap.c
38824--- linux-2.6.32.45/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
38825+++ linux-2.6.32.45/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
38826@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
38827 rc = -ENODEV;
38828 goto out;
38829 }
38830- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38831- !info->fbops->fb_setcmap)) {
38832+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38833 rc = -EINVAL;
38834 goto out1;
38835 }
38836diff -urNp linux-2.6.32.45/drivers/video/fbmem.c linux-2.6.32.45/drivers/video/fbmem.c
38837--- linux-2.6.32.45/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
38838+++ linux-2.6.32.45/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
38839@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
38840 image->dx += image->width + 8;
38841 }
38842 } else if (rotate == FB_ROTATE_UD) {
38843- for (x = 0; x < num && image->dx >= 0; x++) {
38844+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38845 info->fbops->fb_imageblit(info, image);
38846 image->dx -= image->width + 8;
38847 }
38848@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
38849 image->dy += image->height + 8;
38850 }
38851 } else if (rotate == FB_ROTATE_CCW) {
38852- for (x = 0; x < num && image->dy >= 0; x++) {
38853+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38854 info->fbops->fb_imageblit(info, image);
38855 image->dy -= image->height + 8;
38856 }
38857@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
38858 int flags = info->flags;
38859 int ret = 0;
38860
38861+ pax_track_stack();
38862+
38863 if (var->activate & FB_ACTIVATE_INV_MODE) {
38864 struct fb_videomode mode1, mode2;
38865
38866@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
38867 void __user *argp = (void __user *)arg;
38868 long ret = 0;
38869
38870+ pax_track_stack();
38871+
38872 switch (cmd) {
38873 case FBIOGET_VSCREENINFO:
38874 if (!lock_fb_info(info))
38875@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
38876 return -EFAULT;
38877 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38878 return -EINVAL;
38879- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38880+ if (con2fb.framebuffer >= FB_MAX)
38881 return -EINVAL;
38882 if (!registered_fb[con2fb.framebuffer])
38883 request_module("fb%d", con2fb.framebuffer);
38884diff -urNp linux-2.6.32.45/drivers/video/i810/i810_accel.c linux-2.6.32.45/drivers/video/i810/i810_accel.c
38885--- linux-2.6.32.45/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
38886+++ linux-2.6.32.45/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
38887@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
38888 }
38889 }
38890 printk("ringbuffer lockup!!!\n");
38891+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38892 i810_report_error(mmio);
38893 par->dev_flags |= LOCKUP;
38894 info->pixmap.scan_align = 1;
38895diff -urNp linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c
38896--- linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
38897+++ linux-2.6.32.45/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
38898@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
38899 return bd->props.brightness;
38900 }
38901
38902-static struct backlight_ops nvidia_bl_ops = {
38903+static const struct backlight_ops nvidia_bl_ops = {
38904 .get_brightness = nvidia_bl_get_brightness,
38905 .update_status = nvidia_bl_update_status,
38906 };
38907diff -urNp linux-2.6.32.45/drivers/video/riva/fbdev.c linux-2.6.32.45/drivers/video/riva/fbdev.c
38908--- linux-2.6.32.45/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
38909+++ linux-2.6.32.45/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
38910@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
38911 return bd->props.brightness;
38912 }
38913
38914-static struct backlight_ops riva_bl_ops = {
38915+static const struct backlight_ops riva_bl_ops = {
38916 .get_brightness = riva_bl_get_brightness,
38917 .update_status = riva_bl_update_status,
38918 };
38919diff -urNp linux-2.6.32.45/drivers/video/uvesafb.c linux-2.6.32.45/drivers/video/uvesafb.c
38920--- linux-2.6.32.45/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
38921+++ linux-2.6.32.45/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
38922@@ -18,6 +18,7 @@
38923 #include <linux/fb.h>
38924 #include <linux/io.h>
38925 #include <linux/mutex.h>
38926+#include <linux/moduleloader.h>
38927 #include <video/edid.h>
38928 #include <video/uvesafb.h>
38929 #ifdef CONFIG_X86
38930@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
38931 NULL,
38932 };
38933
38934- return call_usermodehelper(v86d_path, argv, envp, 1);
38935+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
38936 }
38937
38938 /*
38939@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
38940 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
38941 par->pmi_setpal = par->ypan = 0;
38942 } else {
38943+
38944+#ifdef CONFIG_PAX_KERNEXEC
38945+#ifdef CONFIG_MODULES
38946+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
38947+#endif
38948+ if (!par->pmi_code) {
38949+ par->pmi_setpal = par->ypan = 0;
38950+ return 0;
38951+ }
38952+#endif
38953+
38954 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
38955 + task->t.regs.edi);
38956+
38957+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38958+ pax_open_kernel();
38959+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
38960+ pax_close_kernel();
38961+
38962+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
38963+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
38964+#else
38965 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
38966 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
38967+#endif
38968+
38969 printk(KERN_INFO "uvesafb: protected mode interface info at "
38970 "%04x:%04x\n",
38971 (u16)task->t.regs.es, (u16)task->t.regs.edi);
38972@@ -1799,6 +1822,11 @@ out:
38973 if (par->vbe_modes)
38974 kfree(par->vbe_modes);
38975
38976+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38977+ if (par->pmi_code)
38978+ module_free_exec(NULL, par->pmi_code);
38979+#endif
38980+
38981 framebuffer_release(info);
38982 return err;
38983 }
38984@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
38985 kfree(par->vbe_state_orig);
38986 if (par->vbe_state_saved)
38987 kfree(par->vbe_state_saved);
38988+
38989+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
38990+ if (par->pmi_code)
38991+ module_free_exec(NULL, par->pmi_code);
38992+#endif
38993+
38994 }
38995
38996 framebuffer_release(info);
38997diff -urNp linux-2.6.32.45/drivers/video/vesafb.c linux-2.6.32.45/drivers/video/vesafb.c
38998--- linux-2.6.32.45/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
38999+++ linux-2.6.32.45/drivers/video/vesafb.c 2011-08-05 20:33:55.000000000 -0400
39000@@ -9,6 +9,7 @@
39001 */
39002
39003 #include <linux/module.h>
39004+#include <linux/moduleloader.h>
39005 #include <linux/kernel.h>
39006 #include <linux/errno.h>
39007 #include <linux/string.h>
39008@@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
39009 static int vram_total __initdata; /* Set total amount of memory */
39010 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39011 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39012-static void (*pmi_start)(void) __read_mostly;
39013-static void (*pmi_pal) (void) __read_mostly;
39014+static void (*pmi_start)(void) __read_only;
39015+static void (*pmi_pal) (void) __read_only;
39016 static int depth __read_mostly;
39017 static int vga_compat __read_mostly;
39018 /* --------------------------------------------------------------------- */
39019@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
39020 unsigned int size_vmode;
39021 unsigned int size_remap;
39022 unsigned int size_total;
39023+ void *pmi_code = NULL;
39024
39025 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39026 return -ENODEV;
39027@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
39028 size_remap = size_total;
39029 vesafb_fix.smem_len = size_remap;
39030
39031-#ifndef __i386__
39032- screen_info.vesapm_seg = 0;
39033-#endif
39034-
39035 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39036 printk(KERN_WARNING
39037 "vesafb: cannot reserve video memory at 0x%lx\n",
39038@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
39039 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39040 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39041
39042+#ifdef __i386__
39043+
39044+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39045+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
39046+ if (!pmi_code)
39047+#elif !defined(CONFIG_PAX_KERNEXEC)
39048+ if (0)
39049+#endif
39050+
39051+#endif
39052+ screen_info.vesapm_seg = 0;
39053+
39054 if (screen_info.vesapm_seg) {
39055- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39056- screen_info.vesapm_seg,screen_info.vesapm_off);
39057+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39058+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39059 }
39060
39061 if (screen_info.vesapm_seg < 0xc000)
39062@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
39063
39064 if (ypan || pmi_setpal) {
39065 unsigned short *pmi_base;
39066+
39067 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39068- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39069- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39070+
39071+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39072+ pax_open_kernel();
39073+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39074+#else
39075+ pmi_code = pmi_base;
39076+#endif
39077+
39078+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39079+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39080+
39081+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39082+ pmi_start = ktva_ktla(pmi_start);
39083+ pmi_pal = ktva_ktla(pmi_pal);
39084+ pax_close_kernel();
39085+#endif
39086+
39087 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39088 if (pmi_base[3]) {
39089 printk(KERN_INFO "vesafb: pmi: ports = ");
39090@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
39091 info->node, info->fix.id);
39092 return 0;
39093 err:
39094+
39095+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39096+ module_free_exec(NULL, pmi_code);
39097+#endif
39098+
39099 if (info->screen_base)
39100 iounmap(info->screen_base);
39101 framebuffer_release(info);
39102diff -urNp linux-2.6.32.45/drivers/xen/sys-hypervisor.c linux-2.6.32.45/drivers/xen/sys-hypervisor.c
39103--- linux-2.6.32.45/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
39104+++ linux-2.6.32.45/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
39105@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
39106 return 0;
39107 }
39108
39109-static struct sysfs_ops hyp_sysfs_ops = {
39110+static const struct sysfs_ops hyp_sysfs_ops = {
39111 .show = hyp_sysfs_show,
39112 .store = hyp_sysfs_store,
39113 };
39114diff -urNp linux-2.6.32.45/fs/9p/vfs_inode.c linux-2.6.32.45/fs/9p/vfs_inode.c
39115--- linux-2.6.32.45/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
39116+++ linux-2.6.32.45/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
39117@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
39118 static void
39119 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39120 {
39121- char *s = nd_get_link(nd);
39122+ const char *s = nd_get_link(nd);
39123
39124 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39125 IS_ERR(s) ? "<error>" : s);
39126diff -urNp linux-2.6.32.45/fs/aio.c linux-2.6.32.45/fs/aio.c
39127--- linux-2.6.32.45/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
39128+++ linux-2.6.32.45/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
39129@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
39130 size += sizeof(struct io_event) * nr_events;
39131 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39132
39133- if (nr_pages < 0)
39134+ if (nr_pages <= 0)
39135 return -EINVAL;
39136
39137 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39138@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
39139 struct aio_timeout to;
39140 int retry = 0;
39141
39142+ pax_track_stack();
39143+
39144 /* needed to zero any padding within an entry (there shouldn't be
39145 * any, but C is fun!
39146 */
39147@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
39148 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
39149 {
39150 ssize_t ret;
39151+ struct iovec iovstack;
39152
39153 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
39154 kiocb->ki_nbytes, 1,
39155- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
39156+ &iovstack, &kiocb->ki_iovec);
39157 if (ret < 0)
39158 goto out;
39159
39160+ if (kiocb->ki_iovec == &iovstack) {
39161+ kiocb->ki_inline_vec = iovstack;
39162+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
39163+ }
39164 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39165 kiocb->ki_cur_seg = 0;
39166 /* ki_nbytes/left now reflect bytes instead of segs */
39167diff -urNp linux-2.6.32.45/fs/attr.c linux-2.6.32.45/fs/attr.c
39168--- linux-2.6.32.45/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
39169+++ linux-2.6.32.45/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
39170@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
39171 unsigned long limit;
39172
39173 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
39174+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39175 if (limit != RLIM_INFINITY && offset > limit)
39176 goto out_sig;
39177 if (offset > inode->i_sb->s_maxbytes)
39178diff -urNp linux-2.6.32.45/fs/autofs/root.c linux-2.6.32.45/fs/autofs/root.c
39179--- linux-2.6.32.45/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
39180+++ linux-2.6.32.45/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
39181@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
39182 set_bit(n,sbi->symlink_bitmap);
39183 sl = &sbi->symlink[n];
39184 sl->len = strlen(symname);
39185- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
39186+ slsize = sl->len+1;
39187+ sl->data = kmalloc(slsize, GFP_KERNEL);
39188 if (!sl->data) {
39189 clear_bit(n,sbi->symlink_bitmap);
39190 unlock_kernel();
39191diff -urNp linux-2.6.32.45/fs/autofs4/symlink.c linux-2.6.32.45/fs/autofs4/symlink.c
39192--- linux-2.6.32.45/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
39193+++ linux-2.6.32.45/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
39194@@ -15,7 +15,7 @@
39195 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
39196 {
39197 struct autofs_info *ino = autofs4_dentry_ino(dentry);
39198- nd_set_link(nd, (char *)ino->u.symlink);
39199+ nd_set_link(nd, ino->u.symlink);
39200 return NULL;
39201 }
39202
39203diff -urNp linux-2.6.32.45/fs/befs/linuxvfs.c linux-2.6.32.45/fs/befs/linuxvfs.c
39204--- linux-2.6.32.45/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
39205+++ linux-2.6.32.45/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
39206@@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
39207 {
39208 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39209 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39210- char *link = nd_get_link(nd);
39211+ const char *link = nd_get_link(nd);
39212 if (!IS_ERR(link))
39213 kfree(link);
39214 }
39215diff -urNp linux-2.6.32.45/fs/binfmt_aout.c linux-2.6.32.45/fs/binfmt_aout.c
39216--- linux-2.6.32.45/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
39217+++ linux-2.6.32.45/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
39218@@ -16,6 +16,7 @@
39219 #include <linux/string.h>
39220 #include <linux/fs.h>
39221 #include <linux/file.h>
39222+#include <linux/security.h>
39223 #include <linux/stat.h>
39224 #include <linux/fcntl.h>
39225 #include <linux/ptrace.h>
39226@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
39227 #endif
39228 # define START_STACK(u) (u.start_stack)
39229
39230+ memset(&dump, 0, sizeof(dump));
39231+
39232 fs = get_fs();
39233 set_fs(KERNEL_DS);
39234 has_dumped = 1;
39235@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
39236
39237 /* If the size of the dump file exceeds the rlimit, then see what would happen
39238 if we wrote the stack, but not the data area. */
39239+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39240 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
39241 dump.u_dsize = 0;
39242
39243 /* Make sure we have enough room to write the stack and data areas. */
39244+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39245 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
39246 dump.u_ssize = 0;
39247
39248@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
39249 dump_size = dump.u_ssize << PAGE_SHIFT;
39250 DUMP_WRITE(dump_start,dump_size);
39251 }
39252-/* Finally dump the task struct. Not be used by gdb, but could be useful */
39253- set_fs(KERNEL_DS);
39254- DUMP_WRITE(current,sizeof(*current));
39255+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
39256 end_coredump:
39257 set_fs(fs);
39258 return has_dumped;
39259@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
39260 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
39261 if (rlim >= RLIM_INFINITY)
39262 rlim = ~0;
39263+
39264+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39265 if (ex.a_data + ex.a_bss > rlim)
39266 return -ENOMEM;
39267
39268@@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
39269 install_exec_creds(bprm);
39270 current->flags &= ~PF_FORKNOEXEC;
39271
39272+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39273+ current->mm->pax_flags = 0UL;
39274+#endif
39275+
39276+#ifdef CONFIG_PAX_PAGEEXEC
39277+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39278+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39279+
39280+#ifdef CONFIG_PAX_EMUTRAMP
39281+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39282+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39283+#endif
39284+
39285+#ifdef CONFIG_PAX_MPROTECT
39286+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39287+ current->mm->pax_flags |= MF_PAX_MPROTECT;
39288+#endif
39289+
39290+ }
39291+#endif
39292+
39293 if (N_MAGIC(ex) == OMAGIC) {
39294 unsigned long text_addr, map_size;
39295 loff_t pos;
39296@@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
39297
39298 down_write(&current->mm->mmap_sem);
39299 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39300- PROT_READ | PROT_WRITE | PROT_EXEC,
39301+ PROT_READ | PROT_WRITE,
39302 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39303 fd_offset + ex.a_text);
39304 up_write(&current->mm->mmap_sem);
39305diff -urNp linux-2.6.32.45/fs/binfmt_elf.c linux-2.6.32.45/fs/binfmt_elf.c
39306--- linux-2.6.32.45/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
39307+++ linux-2.6.32.45/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
39308@@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
39309 #define elf_core_dump NULL
39310 #endif
39311
39312+#ifdef CONFIG_PAX_MPROTECT
39313+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39314+#endif
39315+
39316 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39317 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39318 #else
39319@@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
39320 .load_binary = load_elf_binary,
39321 .load_shlib = load_elf_library,
39322 .core_dump = elf_core_dump,
39323+
39324+#ifdef CONFIG_PAX_MPROTECT
39325+ .handle_mprotect= elf_handle_mprotect,
39326+#endif
39327+
39328 .min_coredump = ELF_EXEC_PAGESIZE,
39329 .hasvdso = 1
39330 };
39331@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
39332
39333 static int set_brk(unsigned long start, unsigned long end)
39334 {
39335+ unsigned long e = end;
39336+
39337 start = ELF_PAGEALIGN(start);
39338 end = ELF_PAGEALIGN(end);
39339 if (end > start) {
39340@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
39341 if (BAD_ADDR(addr))
39342 return addr;
39343 }
39344- current->mm->start_brk = current->mm->brk = end;
39345+ current->mm->start_brk = current->mm->brk = e;
39346 return 0;
39347 }
39348
39349@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
39350 elf_addr_t __user *u_rand_bytes;
39351 const char *k_platform = ELF_PLATFORM;
39352 const char *k_base_platform = ELF_BASE_PLATFORM;
39353- unsigned char k_rand_bytes[16];
39354+ u32 k_rand_bytes[4];
39355 int items;
39356 elf_addr_t *elf_info;
39357 int ei_index = 0;
39358 const struct cred *cred = current_cred();
39359 struct vm_area_struct *vma;
39360+ unsigned long saved_auxv[AT_VECTOR_SIZE];
39361+
39362+ pax_track_stack();
39363
39364 /*
39365 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39366@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
39367 * Generate 16 random bytes for userspace PRNG seeding.
39368 */
39369 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39370- u_rand_bytes = (elf_addr_t __user *)
39371- STACK_ALLOC(p, sizeof(k_rand_bytes));
39372+ srandom32(k_rand_bytes[0] ^ random32());
39373+ srandom32(k_rand_bytes[1] ^ random32());
39374+ srandom32(k_rand_bytes[2] ^ random32());
39375+ srandom32(k_rand_bytes[3] ^ random32());
39376+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
39377+ u_rand_bytes = (elf_addr_t __user *) p;
39378 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39379 return -EFAULT;
39380
39381@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
39382 return -EFAULT;
39383 current->mm->env_end = p;
39384
39385+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39386+
39387 /* Put the elf_info on the stack in the right place. */
39388 sp = (elf_addr_t __user *)envp + 1;
39389- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39390+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39391 return -EFAULT;
39392 return 0;
39393 }
39394@@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
39395 {
39396 struct elf_phdr *elf_phdata;
39397 struct elf_phdr *eppnt;
39398- unsigned long load_addr = 0;
39399+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39400 int load_addr_set = 0;
39401 unsigned long last_bss = 0, elf_bss = 0;
39402- unsigned long error = ~0UL;
39403+ unsigned long error = -EINVAL;
39404 unsigned long total_size;
39405 int retval, i, size;
39406
39407@@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
39408 goto out_close;
39409 }
39410
39411+#ifdef CONFIG_PAX_SEGMEXEC
39412+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39413+ pax_task_size = SEGMEXEC_TASK_SIZE;
39414+#endif
39415+
39416 eppnt = elf_phdata;
39417 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39418 if (eppnt->p_type == PT_LOAD) {
39419@@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
39420 k = load_addr + eppnt->p_vaddr;
39421 if (BAD_ADDR(k) ||
39422 eppnt->p_filesz > eppnt->p_memsz ||
39423- eppnt->p_memsz > TASK_SIZE ||
39424- TASK_SIZE - eppnt->p_memsz < k) {
39425+ eppnt->p_memsz > pax_task_size ||
39426+ pax_task_size - eppnt->p_memsz < k) {
39427 error = -ENOMEM;
39428 goto out_close;
39429 }
39430@@ -532,6 +557,194 @@ out:
39431 return error;
39432 }
39433
39434+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
39435+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
39436+{
39437+ unsigned long pax_flags = 0UL;
39438+
39439+#ifdef CONFIG_PAX_PAGEEXEC
39440+ if (elf_phdata->p_flags & PF_PAGEEXEC)
39441+ pax_flags |= MF_PAX_PAGEEXEC;
39442+#endif
39443+
39444+#ifdef CONFIG_PAX_SEGMEXEC
39445+ if (elf_phdata->p_flags & PF_SEGMEXEC)
39446+ pax_flags |= MF_PAX_SEGMEXEC;
39447+#endif
39448+
39449+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39450+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39451+ if (nx_enabled)
39452+ pax_flags &= ~MF_PAX_SEGMEXEC;
39453+ else
39454+ pax_flags &= ~MF_PAX_PAGEEXEC;
39455+ }
39456+#endif
39457+
39458+#ifdef CONFIG_PAX_EMUTRAMP
39459+ if (elf_phdata->p_flags & PF_EMUTRAMP)
39460+ pax_flags |= MF_PAX_EMUTRAMP;
39461+#endif
39462+
39463+#ifdef CONFIG_PAX_MPROTECT
39464+ if (elf_phdata->p_flags & PF_MPROTECT)
39465+ pax_flags |= MF_PAX_MPROTECT;
39466+#endif
39467+
39468+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39469+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39470+ pax_flags |= MF_PAX_RANDMMAP;
39471+#endif
39472+
39473+ return pax_flags;
39474+}
39475+#endif
39476+
39477+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39478+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
39479+{
39480+ unsigned long pax_flags = 0UL;
39481+
39482+#ifdef CONFIG_PAX_PAGEEXEC
39483+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39484+ pax_flags |= MF_PAX_PAGEEXEC;
39485+#endif
39486+
39487+#ifdef CONFIG_PAX_SEGMEXEC
39488+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39489+ pax_flags |= MF_PAX_SEGMEXEC;
39490+#endif
39491+
39492+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39493+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39494+ if (nx_enabled)
39495+ pax_flags &= ~MF_PAX_SEGMEXEC;
39496+ else
39497+ pax_flags &= ~MF_PAX_PAGEEXEC;
39498+ }
39499+#endif
39500+
39501+#ifdef CONFIG_PAX_EMUTRAMP
39502+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39503+ pax_flags |= MF_PAX_EMUTRAMP;
39504+#endif
39505+
39506+#ifdef CONFIG_PAX_MPROTECT
39507+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39508+ pax_flags |= MF_PAX_MPROTECT;
39509+#endif
39510+
39511+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39512+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39513+ pax_flags |= MF_PAX_RANDMMAP;
39514+#endif
39515+
39516+ return pax_flags;
39517+}
39518+#endif
39519+
39520+#ifdef CONFIG_PAX_EI_PAX
39521+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39522+{
39523+ unsigned long pax_flags = 0UL;
39524+
39525+#ifdef CONFIG_PAX_PAGEEXEC
39526+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39527+ pax_flags |= MF_PAX_PAGEEXEC;
39528+#endif
39529+
39530+#ifdef CONFIG_PAX_SEGMEXEC
39531+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39532+ pax_flags |= MF_PAX_SEGMEXEC;
39533+#endif
39534+
39535+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39536+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39537+ if (nx_enabled)
39538+ pax_flags &= ~MF_PAX_SEGMEXEC;
39539+ else
39540+ pax_flags &= ~MF_PAX_PAGEEXEC;
39541+ }
39542+#endif
39543+
39544+#ifdef CONFIG_PAX_EMUTRAMP
39545+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39546+ pax_flags |= MF_PAX_EMUTRAMP;
39547+#endif
39548+
39549+#ifdef CONFIG_PAX_MPROTECT
39550+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39551+ pax_flags |= MF_PAX_MPROTECT;
39552+#endif
39553+
39554+#ifdef CONFIG_PAX_ASLR
39555+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39556+ pax_flags |= MF_PAX_RANDMMAP;
39557+#endif
39558+
39559+ return pax_flags;
39560+}
39561+#endif
39562+
39563+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39564+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39565+{
39566+ unsigned long pax_flags = 0UL;
39567+
39568+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39569+ unsigned long i;
39570+ int found_flags = 0;
39571+#endif
39572+
39573+#ifdef CONFIG_PAX_EI_PAX
39574+ pax_flags = pax_parse_ei_pax(elf_ex);
39575+#endif
39576+
39577+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39578+ for (i = 0UL; i < elf_ex->e_phnum; i++)
39579+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39580+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39581+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39582+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39583+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39584+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39585+ return -EINVAL;
39586+
39587+#ifdef CONFIG_PAX_SOFTMODE
39588+ if (pax_softmode)
39589+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
39590+ else
39591+#endif
39592+
39593+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
39594+ found_flags = 1;
39595+ break;
39596+ }
39597+#endif
39598+
39599+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
39600+ if (found_flags == 0) {
39601+ struct elf_phdr phdr;
39602+ memset(&phdr, 0, sizeof(phdr));
39603+ phdr.p_flags = PF_NOEMUTRAMP;
39604+#ifdef CONFIG_PAX_SOFTMODE
39605+ if (pax_softmode)
39606+ pax_flags = pax_parse_softmode(&phdr);
39607+ else
39608+#endif
39609+ pax_flags = pax_parse_hardmode(&phdr);
39610+ }
39611+#endif
39612+
39613+
39614+ if (0 > pax_check_flags(&pax_flags))
39615+ return -EINVAL;
39616+
39617+ current->mm->pax_flags = pax_flags;
39618+ return 0;
39619+}
39620+#endif
39621+
39622 /*
39623 * These are the functions used to load ELF style executables and shared
39624 * libraries. There is no binary dependent code anywhere else.
39625@@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
39626 {
39627 unsigned int random_variable = 0;
39628
39629+#ifdef CONFIG_PAX_RANDUSTACK
39630+ if (randomize_va_space)
39631+ return stack_top - current->mm->delta_stack;
39632+#endif
39633+
39634 if ((current->flags & PF_RANDOMIZE) &&
39635 !(current->personality & ADDR_NO_RANDOMIZE)) {
39636 random_variable = get_random_int() & STACK_RND_MASK;
39637@@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
39638 unsigned long load_addr = 0, load_bias = 0;
39639 int load_addr_set = 0;
39640 char * elf_interpreter = NULL;
39641- unsigned long error;
39642+ unsigned long error = 0;
39643 struct elf_phdr *elf_ppnt, *elf_phdata;
39644 unsigned long elf_bss, elf_brk;
39645 int retval, i;
39646@@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
39647 unsigned long start_code, end_code, start_data, end_data;
39648 unsigned long reloc_func_desc = 0;
39649 int executable_stack = EXSTACK_DEFAULT;
39650- unsigned long def_flags = 0;
39651 struct {
39652 struct elfhdr elf_ex;
39653 struct elfhdr interp_elf_ex;
39654 } *loc;
39655+ unsigned long pax_task_size = TASK_SIZE;
39656
39657 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
39658 if (!loc) {
39659@@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
39660
39661 /* OK, This is the point of no return */
39662 current->flags &= ~PF_FORKNOEXEC;
39663- current->mm->def_flags = def_flags;
39664+
39665+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39666+ current->mm->pax_flags = 0UL;
39667+#endif
39668+
39669+#ifdef CONFIG_PAX_DLRESOLVE
39670+ current->mm->call_dl_resolve = 0UL;
39671+#endif
39672+
39673+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
39674+ current->mm->call_syscall = 0UL;
39675+#endif
39676+
39677+#ifdef CONFIG_PAX_ASLR
39678+ current->mm->delta_mmap = 0UL;
39679+ current->mm->delta_stack = 0UL;
39680+#endif
39681+
39682+ current->mm->def_flags = 0;
39683+
39684+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39685+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
39686+ send_sig(SIGKILL, current, 0);
39687+ goto out_free_dentry;
39688+ }
39689+#endif
39690+
39691+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
39692+ pax_set_initial_flags(bprm);
39693+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
39694+ if (pax_set_initial_flags_func)
39695+ (pax_set_initial_flags_func)(bprm);
39696+#endif
39697+
39698+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
39699+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
39700+ current->mm->context.user_cs_limit = PAGE_SIZE;
39701+ current->mm->def_flags |= VM_PAGEEXEC;
39702+ }
39703+#endif
39704+
39705+#ifdef CONFIG_PAX_SEGMEXEC
39706+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
39707+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
39708+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
39709+ pax_task_size = SEGMEXEC_TASK_SIZE;
39710+ }
39711+#endif
39712+
39713+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
39714+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39715+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
39716+ put_cpu();
39717+ }
39718+#endif
39719
39720 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
39721 may depend on the personality. */
39722 SET_PERSONALITY(loc->elf_ex);
39723+
39724+#ifdef CONFIG_PAX_ASLR
39725+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
39726+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
39727+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
39728+ }
39729+#endif
39730+
39731+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
39732+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39733+ executable_stack = EXSTACK_DISABLE_X;
39734+ current->personality &= ~READ_IMPLIES_EXEC;
39735+ } else
39736+#endif
39737+
39738 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
39739 current->personality |= READ_IMPLIES_EXEC;
39740
39741@@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
39742 #else
39743 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
39744 #endif
39745+
39746+#ifdef CONFIG_PAX_RANDMMAP
39747+ /* PaX: randomize base address at the default exe base if requested */
39748+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
39749+#ifdef CONFIG_SPARC64
39750+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
39751+#else
39752+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
39753+#endif
39754+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
39755+ elf_flags |= MAP_FIXED;
39756+ }
39757+#endif
39758+
39759 }
39760
39761 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
39762@@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
39763 * allowed task size. Note that p_filesz must always be
39764 * <= p_memsz so it is only necessary to check p_memsz.
39765 */
39766- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39767- elf_ppnt->p_memsz > TASK_SIZE ||
39768- TASK_SIZE - elf_ppnt->p_memsz < k) {
39769+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
39770+ elf_ppnt->p_memsz > pax_task_size ||
39771+ pax_task_size - elf_ppnt->p_memsz < k) {
39772 /* set_brk can never work. Avoid overflows. */
39773 send_sig(SIGKILL, current, 0);
39774 retval = -EINVAL;
39775@@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
39776 start_data += load_bias;
39777 end_data += load_bias;
39778
39779+#ifdef CONFIG_PAX_RANDMMAP
39780+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
39781+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
39782+#endif
39783+
39784 /* Calling set_brk effectively mmaps the pages that we need
39785 * for the bss and break sections. We must do this before
39786 * mapping in the interpreter, to make sure it doesn't wind
39787@@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
39788 goto out_free_dentry;
39789 }
39790 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
39791- send_sig(SIGSEGV, current, 0);
39792- retval = -EFAULT; /* Nobody gets to see this, but.. */
39793- goto out_free_dentry;
39794+ /*
39795+ * This bss-zeroing can fail if the ELF
39796+ * file specifies odd protections. So
39797+ * we don't check the return value
39798+ */
39799 }
39800
39801 if (elf_interpreter) {
39802@@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
39803 unsigned long n = off;
39804 if (n > PAGE_SIZE)
39805 n = PAGE_SIZE;
39806- if (!dump_write(file, buf, n))
39807+ if (!dump_write(file, buf, n)) {
39808+ free_page((unsigned long)buf);
39809 return 0;
39810+ }
39811 off -= n;
39812 }
39813 free_page((unsigned long)buf);
39814@@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
39815 * Decide what to dump of a segment, part, all or none.
39816 */
39817 static unsigned long vma_dump_size(struct vm_area_struct *vma,
39818- unsigned long mm_flags)
39819+ unsigned long mm_flags, long signr)
39820 {
39821 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
39822
39823@@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
39824 if (vma->vm_file == NULL)
39825 return 0;
39826
39827- if (FILTER(MAPPED_PRIVATE))
39828+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
39829 goto whole;
39830
39831 /*
39832@@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
39833 #undef DUMP_WRITE
39834
39835 #define DUMP_WRITE(addr, nr) \
39836+ do { \
39837+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
39838 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
39839- goto end_coredump;
39840+ goto end_coredump; \
39841+ } while (0);
39842
39843 static void fill_elf_header(struct elfhdr *elf, int segs,
39844 u16 machine, u32 flags, u8 osabi)
39845@@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
39846 {
39847 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
39848 int i = 0;
39849- do
39850+ do {
39851 i += 2;
39852- while (auxv[i - 2] != AT_NULL);
39853+ } while (auxv[i - 2] != AT_NULL);
39854 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
39855 }
39856
39857@@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
39858 phdr.p_offset = offset;
39859 phdr.p_vaddr = vma->vm_start;
39860 phdr.p_paddr = 0;
39861- phdr.p_filesz = vma_dump_size(vma, mm_flags);
39862+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
39863 phdr.p_memsz = vma->vm_end - vma->vm_start;
39864 offset += phdr.p_filesz;
39865 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
39866@@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
39867 unsigned long addr;
39868 unsigned long end;
39869
39870- end = vma->vm_start + vma_dump_size(vma, mm_flags);
39871+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
39872
39873 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
39874 struct page *page;
39875@@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
39876 page = get_dump_page(addr);
39877 if (page) {
39878 void *kaddr = kmap(page);
39879+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
39880 stop = ((size += PAGE_SIZE) > limit) ||
39881 !dump_write(file, kaddr, PAGE_SIZE);
39882 kunmap(page);
39883@@ -2042,6 +2356,97 @@ out:
39884
39885 #endif /* USE_ELF_CORE_DUMP */
39886
39887+#ifdef CONFIG_PAX_MPROTECT
39888+/* PaX: non-PIC ELF libraries need relocations on their executable segments
39889+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
39890+ * we'll remove VM_MAYWRITE for good on RELRO segments.
39891+ *
39892+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
39893+ * basis because we want to allow the common case and not the special ones.
39894+ */
39895+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
39896+{
39897+ struct elfhdr elf_h;
39898+ struct elf_phdr elf_p;
39899+ unsigned long i;
39900+ unsigned long oldflags;
39901+ bool is_textrel_rw, is_textrel_rx, is_relro;
39902+
39903+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
39904+ return;
39905+
39906+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
39907+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
39908+
39909+#ifdef CONFIG_PAX_ELFRELOCS
39910+ /* possible TEXTREL */
39911+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
39912+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
39913+#else
39914+ is_textrel_rw = false;
39915+ is_textrel_rx = false;
39916+#endif
39917+
39918+ /* possible RELRO */
39919+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
39920+
39921+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
39922+ return;
39923+
39924+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
39925+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
39926+
39927+#ifdef CONFIG_PAX_ETEXECRELOCS
39928+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39929+#else
39930+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
39931+#endif
39932+
39933+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
39934+ !elf_check_arch(&elf_h) ||
39935+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
39936+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
39937+ return;
39938+
39939+ for (i = 0UL; i < elf_h.e_phnum; i++) {
39940+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
39941+ return;
39942+ switch (elf_p.p_type) {
39943+ case PT_DYNAMIC:
39944+ if (!is_textrel_rw && !is_textrel_rx)
39945+ continue;
39946+ i = 0UL;
39947+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
39948+ elf_dyn dyn;
39949+
39950+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
39951+ return;
39952+ if (dyn.d_tag == DT_NULL)
39953+ return;
39954+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
39955+ gr_log_textrel(vma);
39956+ if (is_textrel_rw)
39957+ vma->vm_flags |= VM_MAYWRITE;
39958+ else
39959+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
39960+ vma->vm_flags &= ~VM_MAYWRITE;
39961+ return;
39962+ }
39963+ i++;
39964+ }
39965+ return;
39966+
39967+ case PT_GNU_RELRO:
39968+ if (!is_relro)
39969+ continue;
39970+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
39971+ vma->vm_flags &= ~VM_MAYWRITE;
39972+ return;
39973+ }
39974+ }
39975+}
39976+#endif
39977+
39978 static int __init init_elf_binfmt(void)
39979 {
39980 return register_binfmt(&elf_format);
39981diff -urNp linux-2.6.32.45/fs/binfmt_flat.c linux-2.6.32.45/fs/binfmt_flat.c
39982--- linux-2.6.32.45/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
39983+++ linux-2.6.32.45/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
39984@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
39985 realdatastart = (unsigned long) -ENOMEM;
39986 printk("Unable to allocate RAM for process data, errno %d\n",
39987 (int)-realdatastart);
39988+ down_write(&current->mm->mmap_sem);
39989 do_munmap(current->mm, textpos, text_len);
39990+ up_write(&current->mm->mmap_sem);
39991 ret = realdatastart;
39992 goto err;
39993 }
39994@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
39995 }
39996 if (IS_ERR_VALUE(result)) {
39997 printk("Unable to read data+bss, errno %d\n", (int)-result);
39998+ down_write(&current->mm->mmap_sem);
39999 do_munmap(current->mm, textpos, text_len);
40000 do_munmap(current->mm, realdatastart, data_len + extra);
40001+ up_write(&current->mm->mmap_sem);
40002 ret = result;
40003 goto err;
40004 }
40005@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
40006 }
40007 if (IS_ERR_VALUE(result)) {
40008 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40009+ down_write(&current->mm->mmap_sem);
40010 do_munmap(current->mm, textpos, text_len + data_len + extra +
40011 MAX_SHARED_LIBS * sizeof(unsigned long));
40012+ up_write(&current->mm->mmap_sem);
40013 ret = result;
40014 goto err;
40015 }
40016diff -urNp linux-2.6.32.45/fs/bio.c linux-2.6.32.45/fs/bio.c
40017--- linux-2.6.32.45/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
40018+++ linux-2.6.32.45/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
40019@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
40020
40021 i = 0;
40022 while (i < bio_slab_nr) {
40023- struct bio_slab *bslab = &bio_slabs[i];
40024+ bslab = &bio_slabs[i];
40025
40026 if (!bslab->slab && entry == -1)
40027 entry = i;
40028@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
40029 const int read = bio_data_dir(bio) == READ;
40030 struct bio_map_data *bmd = bio->bi_private;
40031 int i;
40032- char *p = bmd->sgvecs[0].iov_base;
40033+ char *p = (__force char *)bmd->sgvecs[0].iov_base;
40034
40035 __bio_for_each_segment(bvec, bio, i, 0) {
40036 char *addr = page_address(bvec->bv_page);
40037diff -urNp linux-2.6.32.45/fs/block_dev.c linux-2.6.32.45/fs/block_dev.c
40038--- linux-2.6.32.45/fs/block_dev.c 2011-08-09 18:35:29.000000000 -0400
40039+++ linux-2.6.32.45/fs/block_dev.c 2011-08-09 18:34:00.000000000 -0400
40040@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
40041 else if (bdev->bd_contains == bdev)
40042 res = 0; /* is a whole device which isn't held */
40043
40044- else if (bdev->bd_contains->bd_holder == bd_claim)
40045+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
40046 res = 0; /* is a partition of a device that is being partitioned */
40047 else if (bdev->bd_contains->bd_holder != NULL)
40048 res = -EBUSY; /* is a partition of a held device */
40049diff -urNp linux-2.6.32.45/fs/btrfs/ctree.c linux-2.6.32.45/fs/btrfs/ctree.c
40050--- linux-2.6.32.45/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
40051+++ linux-2.6.32.45/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
40052@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
40053 free_extent_buffer(buf);
40054 add_root_to_dirty_list(root);
40055 } else {
40056- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40057- parent_start = parent->start;
40058- else
40059+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40060+ if (parent)
40061+ parent_start = parent->start;
40062+ else
40063+ parent_start = 0;
40064+ } else
40065 parent_start = 0;
40066
40067 WARN_ON(trans->transid != btrfs_header_generation(parent));
40068@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
40069
40070 ret = 0;
40071 if (slot == 0) {
40072- struct btrfs_disk_key disk_key;
40073 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
40074 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
40075 }
40076diff -urNp linux-2.6.32.45/fs/btrfs/disk-io.c linux-2.6.32.45/fs/btrfs/disk-io.c
40077--- linux-2.6.32.45/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
40078+++ linux-2.6.32.45/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
40079@@ -39,7 +39,7 @@
40080 #include "tree-log.h"
40081 #include "free-space-cache.h"
40082
40083-static struct extent_io_ops btree_extent_io_ops;
40084+static const struct extent_io_ops btree_extent_io_ops;
40085 static void end_workqueue_fn(struct btrfs_work *work);
40086 static void free_fs_root(struct btrfs_root *root);
40087
40088@@ -2607,7 +2607,7 @@ out:
40089 return 0;
40090 }
40091
40092-static struct extent_io_ops btree_extent_io_ops = {
40093+static const struct extent_io_ops btree_extent_io_ops = {
40094 .write_cache_pages_lock_hook = btree_lock_page_hook,
40095 .readpage_end_io_hook = btree_readpage_end_io_hook,
40096 .submit_bio_hook = btree_submit_bio_hook,
40097diff -urNp linux-2.6.32.45/fs/btrfs/extent_io.h linux-2.6.32.45/fs/btrfs/extent_io.h
40098--- linux-2.6.32.45/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
40099+++ linux-2.6.32.45/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
40100@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
40101 struct bio *bio, int mirror_num,
40102 unsigned long bio_flags);
40103 struct extent_io_ops {
40104- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
40105+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
40106 u64 start, u64 end, int *page_started,
40107 unsigned long *nr_written);
40108- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
40109- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
40110+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
40111+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
40112 extent_submit_bio_hook_t *submit_bio_hook;
40113- int (*merge_bio_hook)(struct page *page, unsigned long offset,
40114+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
40115 size_t size, struct bio *bio,
40116 unsigned long bio_flags);
40117- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
40118- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
40119+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
40120+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
40121 u64 start, u64 end,
40122 struct extent_state *state);
40123- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
40124+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
40125 u64 start, u64 end,
40126 struct extent_state *state);
40127- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
40128+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
40129 struct extent_state *state);
40130- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
40131+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
40132 struct extent_state *state, int uptodate);
40133- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
40134+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
40135 unsigned long old, unsigned long bits);
40136- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
40137+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
40138 unsigned long bits);
40139- int (*merge_extent_hook)(struct inode *inode,
40140+ int (* const merge_extent_hook)(struct inode *inode,
40141 struct extent_state *new,
40142 struct extent_state *other);
40143- int (*split_extent_hook)(struct inode *inode,
40144+ int (* const split_extent_hook)(struct inode *inode,
40145 struct extent_state *orig, u64 split);
40146- int (*write_cache_pages_lock_hook)(struct page *page);
40147+ int (* const write_cache_pages_lock_hook)(struct page *page);
40148 };
40149
40150 struct extent_io_tree {
40151@@ -88,7 +88,7 @@ struct extent_io_tree {
40152 u64 dirty_bytes;
40153 spinlock_t lock;
40154 spinlock_t buffer_lock;
40155- struct extent_io_ops *ops;
40156+ const struct extent_io_ops *ops;
40157 };
40158
40159 struct extent_state {
40160diff -urNp linux-2.6.32.45/fs/btrfs/extent-tree.c linux-2.6.32.45/fs/btrfs/extent-tree.c
40161--- linux-2.6.32.45/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
40162+++ linux-2.6.32.45/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
40163@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
40164 u64 group_start = group->key.objectid;
40165 new_extents = kmalloc(sizeof(*new_extents),
40166 GFP_NOFS);
40167+ if (!new_extents) {
40168+ ret = -ENOMEM;
40169+ goto out;
40170+ }
40171 nr_extents = 1;
40172 ret = get_new_locations(reloc_inode,
40173 extent_key,
40174diff -urNp linux-2.6.32.45/fs/btrfs/free-space-cache.c linux-2.6.32.45/fs/btrfs/free-space-cache.c
40175--- linux-2.6.32.45/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
40176+++ linux-2.6.32.45/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
40177@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
40178
40179 while(1) {
40180 if (entry->bytes < bytes || entry->offset < min_start) {
40181- struct rb_node *node;
40182-
40183 node = rb_next(&entry->offset_index);
40184 if (!node)
40185 break;
40186@@ -1226,7 +1224,7 @@ again:
40187 */
40188 while (entry->bitmap || found_bitmap ||
40189 (!entry->bitmap && entry->bytes < min_bytes)) {
40190- struct rb_node *node = rb_next(&entry->offset_index);
40191+ node = rb_next(&entry->offset_index);
40192
40193 if (entry->bitmap && entry->bytes > bytes + empty_size) {
40194 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
40195diff -urNp linux-2.6.32.45/fs/btrfs/inode.c linux-2.6.32.45/fs/btrfs/inode.c
40196--- linux-2.6.32.45/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40197+++ linux-2.6.32.45/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
40198@@ -63,7 +63,7 @@ static const struct inode_operations btr
40199 static const struct address_space_operations btrfs_aops;
40200 static const struct address_space_operations btrfs_symlink_aops;
40201 static const struct file_operations btrfs_dir_file_operations;
40202-static struct extent_io_ops btrfs_extent_io_ops;
40203+static const struct extent_io_ops btrfs_extent_io_ops;
40204
40205 static struct kmem_cache *btrfs_inode_cachep;
40206 struct kmem_cache *btrfs_trans_handle_cachep;
40207@@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
40208 1, 0, NULL, GFP_NOFS);
40209 while (start < end) {
40210 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
40211+ BUG_ON(!async_cow);
40212 async_cow->inode = inode;
40213 async_cow->root = root;
40214 async_cow->locked_page = locked_page;
40215@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
40216 inline_size = btrfs_file_extent_inline_item_len(leaf,
40217 btrfs_item_nr(leaf, path->slots[0]));
40218 tmp = kmalloc(inline_size, GFP_NOFS);
40219+ if (!tmp)
40220+ return -ENOMEM;
40221 ptr = btrfs_file_extent_inline_start(item);
40222
40223 read_extent_buffer(leaf, tmp, ptr, inline_size);
40224@@ -5410,7 +5413,7 @@ fail:
40225 return -ENOMEM;
40226 }
40227
40228-static int btrfs_getattr(struct vfsmount *mnt,
40229+int btrfs_getattr(struct vfsmount *mnt,
40230 struct dentry *dentry, struct kstat *stat)
40231 {
40232 struct inode *inode = dentry->d_inode;
40233@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
40234 return 0;
40235 }
40236
40237+EXPORT_SYMBOL(btrfs_getattr);
40238+
40239+dev_t get_btrfs_dev_from_inode(struct inode *inode)
40240+{
40241+ return BTRFS_I(inode)->root->anon_super.s_dev;
40242+}
40243+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40244+
40245 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
40246 struct inode *new_dir, struct dentry *new_dentry)
40247 {
40248@@ -5972,7 +5983,7 @@ static const struct file_operations btrf
40249 .fsync = btrfs_sync_file,
40250 };
40251
40252-static struct extent_io_ops btrfs_extent_io_ops = {
40253+static const struct extent_io_ops btrfs_extent_io_ops = {
40254 .fill_delalloc = run_delalloc_range,
40255 .submit_bio_hook = btrfs_submit_bio_hook,
40256 .merge_bio_hook = btrfs_merge_bio_hook,
40257diff -urNp linux-2.6.32.45/fs/btrfs/relocation.c linux-2.6.32.45/fs/btrfs/relocation.c
40258--- linux-2.6.32.45/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
40259+++ linux-2.6.32.45/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
40260@@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
40261 }
40262 spin_unlock(&rc->reloc_root_tree.lock);
40263
40264- BUG_ON((struct btrfs_root *)node->data != root);
40265+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
40266
40267 if (!del) {
40268 spin_lock(&rc->reloc_root_tree.lock);
40269diff -urNp linux-2.6.32.45/fs/btrfs/sysfs.c linux-2.6.32.45/fs/btrfs/sysfs.c
40270--- linux-2.6.32.45/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
40271+++ linux-2.6.32.45/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
40272@@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
40273 complete(&root->kobj_unregister);
40274 }
40275
40276-static struct sysfs_ops btrfs_super_attr_ops = {
40277+static const struct sysfs_ops btrfs_super_attr_ops = {
40278 .show = btrfs_super_attr_show,
40279 .store = btrfs_super_attr_store,
40280 };
40281
40282-static struct sysfs_ops btrfs_root_attr_ops = {
40283+static const struct sysfs_ops btrfs_root_attr_ops = {
40284 .show = btrfs_root_attr_show,
40285 .store = btrfs_root_attr_store,
40286 };
40287diff -urNp linux-2.6.32.45/fs/buffer.c linux-2.6.32.45/fs/buffer.c
40288--- linux-2.6.32.45/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
40289+++ linux-2.6.32.45/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
40290@@ -25,6 +25,7 @@
40291 #include <linux/percpu.h>
40292 #include <linux/slab.h>
40293 #include <linux/capability.h>
40294+#include <linux/security.h>
40295 #include <linux/blkdev.h>
40296 #include <linux/file.h>
40297 #include <linux/quotaops.h>
40298diff -urNp linux-2.6.32.45/fs/cachefiles/bind.c linux-2.6.32.45/fs/cachefiles/bind.c
40299--- linux-2.6.32.45/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
40300+++ linux-2.6.32.45/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
40301@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
40302 args);
40303
40304 /* start by checking things over */
40305- ASSERT(cache->fstop_percent >= 0 &&
40306- cache->fstop_percent < cache->fcull_percent &&
40307+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
40308 cache->fcull_percent < cache->frun_percent &&
40309 cache->frun_percent < 100);
40310
40311- ASSERT(cache->bstop_percent >= 0 &&
40312- cache->bstop_percent < cache->bcull_percent &&
40313+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
40314 cache->bcull_percent < cache->brun_percent &&
40315 cache->brun_percent < 100);
40316
40317diff -urNp linux-2.6.32.45/fs/cachefiles/daemon.c linux-2.6.32.45/fs/cachefiles/daemon.c
40318--- linux-2.6.32.45/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
40319+++ linux-2.6.32.45/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
40320@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
40321 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40322 return -EIO;
40323
40324- if (datalen < 0 || datalen > PAGE_SIZE - 1)
40325+ if (datalen > PAGE_SIZE - 1)
40326 return -EOPNOTSUPP;
40327
40328 /* drag the command string into the kernel so we can parse it */
40329@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
40330 if (args[0] != '%' || args[1] != '\0')
40331 return -EINVAL;
40332
40333- if (fstop < 0 || fstop >= cache->fcull_percent)
40334+ if (fstop >= cache->fcull_percent)
40335 return cachefiles_daemon_range_error(cache, args);
40336
40337 cache->fstop_percent = fstop;
40338@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
40339 if (args[0] != '%' || args[1] != '\0')
40340 return -EINVAL;
40341
40342- if (bstop < 0 || bstop >= cache->bcull_percent)
40343+ if (bstop >= cache->bcull_percent)
40344 return cachefiles_daemon_range_error(cache, args);
40345
40346 cache->bstop_percent = bstop;
40347diff -urNp linux-2.6.32.45/fs/cachefiles/internal.h linux-2.6.32.45/fs/cachefiles/internal.h
40348--- linux-2.6.32.45/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
40349+++ linux-2.6.32.45/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
40350@@ -56,7 +56,7 @@ struct cachefiles_cache {
40351 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40352 struct rb_root active_nodes; /* active nodes (can't be culled) */
40353 rwlock_t active_lock; /* lock for active_nodes */
40354- atomic_t gravecounter; /* graveyard uniquifier */
40355+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40356 unsigned frun_percent; /* when to stop culling (% files) */
40357 unsigned fcull_percent; /* when to start culling (% files) */
40358 unsigned fstop_percent; /* when to stop allocating (% files) */
40359@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
40360 * proc.c
40361 */
40362 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40363-extern atomic_t cachefiles_lookup_histogram[HZ];
40364-extern atomic_t cachefiles_mkdir_histogram[HZ];
40365-extern atomic_t cachefiles_create_histogram[HZ];
40366+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40367+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40368+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40369
40370 extern int __init cachefiles_proc_init(void);
40371 extern void cachefiles_proc_cleanup(void);
40372 static inline
40373-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40374+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40375 {
40376 unsigned long jif = jiffies - start_jif;
40377 if (jif >= HZ)
40378 jif = HZ - 1;
40379- atomic_inc(&histogram[jif]);
40380+ atomic_inc_unchecked(&histogram[jif]);
40381 }
40382
40383 #else
40384diff -urNp linux-2.6.32.45/fs/cachefiles/namei.c linux-2.6.32.45/fs/cachefiles/namei.c
40385--- linux-2.6.32.45/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
40386+++ linux-2.6.32.45/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
40387@@ -250,7 +250,7 @@ try_again:
40388 /* first step is to make up a grave dentry in the graveyard */
40389 sprintf(nbuffer, "%08x%08x",
40390 (uint32_t) get_seconds(),
40391- (uint32_t) atomic_inc_return(&cache->gravecounter));
40392+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40393
40394 /* do the multiway lock magic */
40395 trap = lock_rename(cache->graveyard, dir);
40396diff -urNp linux-2.6.32.45/fs/cachefiles/proc.c linux-2.6.32.45/fs/cachefiles/proc.c
40397--- linux-2.6.32.45/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
40398+++ linux-2.6.32.45/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
40399@@ -14,9 +14,9 @@
40400 #include <linux/seq_file.h>
40401 #include "internal.h"
40402
40403-atomic_t cachefiles_lookup_histogram[HZ];
40404-atomic_t cachefiles_mkdir_histogram[HZ];
40405-atomic_t cachefiles_create_histogram[HZ];
40406+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40407+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40408+atomic_unchecked_t cachefiles_create_histogram[HZ];
40409
40410 /*
40411 * display the latency histogram
40412@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
40413 return 0;
40414 default:
40415 index = (unsigned long) v - 3;
40416- x = atomic_read(&cachefiles_lookup_histogram[index]);
40417- y = atomic_read(&cachefiles_mkdir_histogram[index]);
40418- z = atomic_read(&cachefiles_create_histogram[index]);
40419+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40420+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40421+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40422 if (x == 0 && y == 0 && z == 0)
40423 return 0;
40424
40425diff -urNp linux-2.6.32.45/fs/cachefiles/rdwr.c linux-2.6.32.45/fs/cachefiles/rdwr.c
40426--- linux-2.6.32.45/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
40427+++ linux-2.6.32.45/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
40428@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
40429 old_fs = get_fs();
40430 set_fs(KERNEL_DS);
40431 ret = file->f_op->write(
40432- file, (const void __user *) data, len, &pos);
40433+ file, (__force const void __user *) data, len, &pos);
40434 set_fs(old_fs);
40435 kunmap(page);
40436 if (ret != len)
40437diff -urNp linux-2.6.32.45/fs/cifs/cifs_debug.c linux-2.6.32.45/fs/cifs/cifs_debug.c
40438--- linux-2.6.32.45/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
40439+++ linux-2.6.32.45/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
40440@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
40441 tcon = list_entry(tmp3,
40442 struct cifsTconInfo,
40443 tcon_list);
40444- atomic_set(&tcon->num_smbs_sent, 0);
40445- atomic_set(&tcon->num_writes, 0);
40446- atomic_set(&tcon->num_reads, 0);
40447- atomic_set(&tcon->num_oplock_brks, 0);
40448- atomic_set(&tcon->num_opens, 0);
40449- atomic_set(&tcon->num_posixopens, 0);
40450- atomic_set(&tcon->num_posixmkdirs, 0);
40451- atomic_set(&tcon->num_closes, 0);
40452- atomic_set(&tcon->num_deletes, 0);
40453- atomic_set(&tcon->num_mkdirs, 0);
40454- atomic_set(&tcon->num_rmdirs, 0);
40455- atomic_set(&tcon->num_renames, 0);
40456- atomic_set(&tcon->num_t2renames, 0);
40457- atomic_set(&tcon->num_ffirst, 0);
40458- atomic_set(&tcon->num_fnext, 0);
40459- atomic_set(&tcon->num_fclose, 0);
40460- atomic_set(&tcon->num_hardlinks, 0);
40461- atomic_set(&tcon->num_symlinks, 0);
40462- atomic_set(&tcon->num_locks, 0);
40463+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40464+ atomic_set_unchecked(&tcon->num_writes, 0);
40465+ atomic_set_unchecked(&tcon->num_reads, 0);
40466+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40467+ atomic_set_unchecked(&tcon->num_opens, 0);
40468+ atomic_set_unchecked(&tcon->num_posixopens, 0);
40469+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40470+ atomic_set_unchecked(&tcon->num_closes, 0);
40471+ atomic_set_unchecked(&tcon->num_deletes, 0);
40472+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
40473+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
40474+ atomic_set_unchecked(&tcon->num_renames, 0);
40475+ atomic_set_unchecked(&tcon->num_t2renames, 0);
40476+ atomic_set_unchecked(&tcon->num_ffirst, 0);
40477+ atomic_set_unchecked(&tcon->num_fnext, 0);
40478+ atomic_set_unchecked(&tcon->num_fclose, 0);
40479+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
40480+ atomic_set_unchecked(&tcon->num_symlinks, 0);
40481+ atomic_set_unchecked(&tcon->num_locks, 0);
40482 }
40483 }
40484 }
40485@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
40486 if (tcon->need_reconnect)
40487 seq_puts(m, "\tDISCONNECTED ");
40488 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40489- atomic_read(&tcon->num_smbs_sent),
40490- atomic_read(&tcon->num_oplock_brks));
40491+ atomic_read_unchecked(&tcon->num_smbs_sent),
40492+ atomic_read_unchecked(&tcon->num_oplock_brks));
40493 seq_printf(m, "\nReads: %d Bytes: %lld",
40494- atomic_read(&tcon->num_reads),
40495+ atomic_read_unchecked(&tcon->num_reads),
40496 (long long)(tcon->bytes_read));
40497 seq_printf(m, "\nWrites: %d Bytes: %lld",
40498- atomic_read(&tcon->num_writes),
40499+ atomic_read_unchecked(&tcon->num_writes),
40500 (long long)(tcon->bytes_written));
40501 seq_printf(m, "\nFlushes: %d",
40502- atomic_read(&tcon->num_flushes));
40503+ atomic_read_unchecked(&tcon->num_flushes));
40504 seq_printf(m, "\nLocks: %d HardLinks: %d "
40505 "Symlinks: %d",
40506- atomic_read(&tcon->num_locks),
40507- atomic_read(&tcon->num_hardlinks),
40508- atomic_read(&tcon->num_symlinks));
40509+ atomic_read_unchecked(&tcon->num_locks),
40510+ atomic_read_unchecked(&tcon->num_hardlinks),
40511+ atomic_read_unchecked(&tcon->num_symlinks));
40512 seq_printf(m, "\nOpens: %d Closes: %d "
40513 "Deletes: %d",
40514- atomic_read(&tcon->num_opens),
40515- atomic_read(&tcon->num_closes),
40516- atomic_read(&tcon->num_deletes));
40517+ atomic_read_unchecked(&tcon->num_opens),
40518+ atomic_read_unchecked(&tcon->num_closes),
40519+ atomic_read_unchecked(&tcon->num_deletes));
40520 seq_printf(m, "\nPosix Opens: %d "
40521 "Posix Mkdirs: %d",
40522- atomic_read(&tcon->num_posixopens),
40523- atomic_read(&tcon->num_posixmkdirs));
40524+ atomic_read_unchecked(&tcon->num_posixopens),
40525+ atomic_read_unchecked(&tcon->num_posixmkdirs));
40526 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40527- atomic_read(&tcon->num_mkdirs),
40528- atomic_read(&tcon->num_rmdirs));
40529+ atomic_read_unchecked(&tcon->num_mkdirs),
40530+ atomic_read_unchecked(&tcon->num_rmdirs));
40531 seq_printf(m, "\nRenames: %d T2 Renames %d",
40532- atomic_read(&tcon->num_renames),
40533- atomic_read(&tcon->num_t2renames));
40534+ atomic_read_unchecked(&tcon->num_renames),
40535+ atomic_read_unchecked(&tcon->num_t2renames));
40536 seq_printf(m, "\nFindFirst: %d FNext %d "
40537 "FClose %d",
40538- atomic_read(&tcon->num_ffirst),
40539- atomic_read(&tcon->num_fnext),
40540- atomic_read(&tcon->num_fclose));
40541+ atomic_read_unchecked(&tcon->num_ffirst),
40542+ atomic_read_unchecked(&tcon->num_fnext),
40543+ atomic_read_unchecked(&tcon->num_fclose));
40544 }
40545 }
40546 }
40547diff -urNp linux-2.6.32.45/fs/cifs/cifsglob.h linux-2.6.32.45/fs/cifs/cifsglob.h
40548--- linux-2.6.32.45/fs/cifs/cifsglob.h 2011-08-09 18:35:29.000000000 -0400
40549+++ linux-2.6.32.45/fs/cifs/cifsglob.h 2011-08-09 18:34:00.000000000 -0400
40550@@ -252,28 +252,28 @@ struct cifsTconInfo {
40551 __u16 Flags; /* optional support bits */
40552 enum statusEnum tidStatus;
40553 #ifdef CONFIG_CIFS_STATS
40554- atomic_t num_smbs_sent;
40555- atomic_t num_writes;
40556- atomic_t num_reads;
40557- atomic_t num_flushes;
40558- atomic_t num_oplock_brks;
40559- atomic_t num_opens;
40560- atomic_t num_closes;
40561- atomic_t num_deletes;
40562- atomic_t num_mkdirs;
40563- atomic_t num_posixopens;
40564- atomic_t num_posixmkdirs;
40565- atomic_t num_rmdirs;
40566- atomic_t num_renames;
40567- atomic_t num_t2renames;
40568- atomic_t num_ffirst;
40569- atomic_t num_fnext;
40570- atomic_t num_fclose;
40571- atomic_t num_hardlinks;
40572- atomic_t num_symlinks;
40573- atomic_t num_locks;
40574- atomic_t num_acl_get;
40575- atomic_t num_acl_set;
40576+ atomic_unchecked_t num_smbs_sent;
40577+ atomic_unchecked_t num_writes;
40578+ atomic_unchecked_t num_reads;
40579+ atomic_unchecked_t num_flushes;
40580+ atomic_unchecked_t num_oplock_brks;
40581+ atomic_unchecked_t num_opens;
40582+ atomic_unchecked_t num_closes;
40583+ atomic_unchecked_t num_deletes;
40584+ atomic_unchecked_t num_mkdirs;
40585+ atomic_unchecked_t num_posixopens;
40586+ atomic_unchecked_t num_posixmkdirs;
40587+ atomic_unchecked_t num_rmdirs;
40588+ atomic_unchecked_t num_renames;
40589+ atomic_unchecked_t num_t2renames;
40590+ atomic_unchecked_t num_ffirst;
40591+ atomic_unchecked_t num_fnext;
40592+ atomic_unchecked_t num_fclose;
40593+ atomic_unchecked_t num_hardlinks;
40594+ atomic_unchecked_t num_symlinks;
40595+ atomic_unchecked_t num_locks;
40596+ atomic_unchecked_t num_acl_get;
40597+ atomic_unchecked_t num_acl_set;
40598 #ifdef CONFIG_CIFS_STATS2
40599 unsigned long long time_writes;
40600 unsigned long long time_reads;
40601@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
40602 }
40603
40604 #ifdef CONFIG_CIFS_STATS
40605-#define cifs_stats_inc atomic_inc
40606+#define cifs_stats_inc atomic_inc_unchecked
40607
40608 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
40609 unsigned int bytes)
40610diff -urNp linux-2.6.32.45/fs/cifs/link.c linux-2.6.32.45/fs/cifs/link.c
40611--- linux-2.6.32.45/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
40612+++ linux-2.6.32.45/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
40613@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
40614
40615 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
40616 {
40617- char *p = nd_get_link(nd);
40618+ const char *p = nd_get_link(nd);
40619 if (!IS_ERR(p))
40620 kfree(p);
40621 }
40622diff -urNp linux-2.6.32.45/fs/coda/cache.c linux-2.6.32.45/fs/coda/cache.c
40623--- linux-2.6.32.45/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
40624+++ linux-2.6.32.45/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
40625@@ -24,14 +24,14 @@
40626 #include <linux/coda_fs_i.h>
40627 #include <linux/coda_cache.h>
40628
40629-static atomic_t permission_epoch = ATOMIC_INIT(0);
40630+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
40631
40632 /* replace or extend an acl cache hit */
40633 void coda_cache_enter(struct inode *inode, int mask)
40634 {
40635 struct coda_inode_info *cii = ITOC(inode);
40636
40637- cii->c_cached_epoch = atomic_read(&permission_epoch);
40638+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
40639 if (cii->c_uid != current_fsuid()) {
40640 cii->c_uid = current_fsuid();
40641 cii->c_cached_perm = mask;
40642@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
40643 void coda_cache_clear_inode(struct inode *inode)
40644 {
40645 struct coda_inode_info *cii = ITOC(inode);
40646- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
40647+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
40648 }
40649
40650 /* remove all acl caches */
40651 void coda_cache_clear_all(struct super_block *sb)
40652 {
40653- atomic_inc(&permission_epoch);
40654+ atomic_inc_unchecked(&permission_epoch);
40655 }
40656
40657
40658@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
40659
40660 hit = (mask & cii->c_cached_perm) == mask &&
40661 cii->c_uid == current_fsuid() &&
40662- cii->c_cached_epoch == atomic_read(&permission_epoch);
40663+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
40664
40665 return hit;
40666 }
40667diff -urNp linux-2.6.32.45/fs/compat_binfmt_elf.c linux-2.6.32.45/fs/compat_binfmt_elf.c
40668--- linux-2.6.32.45/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
40669+++ linux-2.6.32.45/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
40670@@ -29,10 +29,12 @@
40671 #undef elfhdr
40672 #undef elf_phdr
40673 #undef elf_note
40674+#undef elf_dyn
40675 #undef elf_addr_t
40676 #define elfhdr elf32_hdr
40677 #define elf_phdr elf32_phdr
40678 #define elf_note elf32_note
40679+#define elf_dyn Elf32_Dyn
40680 #define elf_addr_t Elf32_Addr
40681
40682 /*
40683diff -urNp linux-2.6.32.45/fs/compat.c linux-2.6.32.45/fs/compat.c
40684--- linux-2.6.32.45/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
40685+++ linux-2.6.32.45/fs/compat.c 2011-08-11 19:56:56.000000000 -0400
40686@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
40687
40688 struct compat_readdir_callback {
40689 struct compat_old_linux_dirent __user *dirent;
40690+ struct file * file;
40691 int result;
40692 };
40693
40694@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
40695 buf->result = -EOVERFLOW;
40696 return -EOVERFLOW;
40697 }
40698+
40699+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40700+ return 0;
40701+
40702 buf->result++;
40703 dirent = buf->dirent;
40704 if (!access_ok(VERIFY_WRITE, dirent,
40705@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
40706
40707 buf.result = 0;
40708 buf.dirent = dirent;
40709+ buf.file = file;
40710
40711 error = vfs_readdir(file, compat_fillonedir, &buf);
40712 if (buf.result)
40713@@ -899,6 +905,7 @@ struct compat_linux_dirent {
40714 struct compat_getdents_callback {
40715 struct compat_linux_dirent __user *current_dir;
40716 struct compat_linux_dirent __user *previous;
40717+ struct file * file;
40718 int count;
40719 int error;
40720 };
40721@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
40722 buf->error = -EOVERFLOW;
40723 return -EOVERFLOW;
40724 }
40725+
40726+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40727+ return 0;
40728+
40729 dirent = buf->previous;
40730 if (dirent) {
40731 if (__put_user(offset, &dirent->d_off))
40732@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
40733 buf.previous = NULL;
40734 buf.count = count;
40735 buf.error = 0;
40736+ buf.file = file;
40737
40738 error = vfs_readdir(file, compat_filldir, &buf);
40739 if (error >= 0)
40740@@ -987,6 +999,7 @@ out:
40741 struct compat_getdents_callback64 {
40742 struct linux_dirent64 __user *current_dir;
40743 struct linux_dirent64 __user *previous;
40744+ struct file * file;
40745 int count;
40746 int error;
40747 };
40748@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
40749 buf->error = -EINVAL; /* only used if we fail.. */
40750 if (reclen > buf->count)
40751 return -EINVAL;
40752+
40753+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40754+ return 0;
40755+
40756 dirent = buf->previous;
40757
40758 if (dirent) {
40759@@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
40760 buf.previous = NULL;
40761 buf.count = count;
40762 buf.error = 0;
40763+ buf.file = file;
40764
40765 error = vfs_readdir(file, compat_filldir64, &buf);
40766 if (error >= 0)
40767@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
40768 * verify all the pointers
40769 */
40770 ret = -EINVAL;
40771- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
40772+ if (nr_segs > UIO_MAXIOV)
40773 goto out;
40774 if (!file->f_op)
40775 goto out;
40776@@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
40777 compat_uptr_t __user *envp,
40778 struct pt_regs * regs)
40779 {
40780+#ifdef CONFIG_GRKERNSEC
40781+ struct file *old_exec_file;
40782+ struct acl_subject_label *old_acl;
40783+ struct rlimit old_rlim[RLIM_NLIMITS];
40784+#endif
40785 struct linux_binprm *bprm;
40786 struct file *file;
40787 struct files_struct *displaced;
40788 bool clear_in_exec;
40789 int retval;
40790+ const struct cred *cred = current_cred();
40791+
40792+ /*
40793+ * We move the actual failure in case of RLIMIT_NPROC excess from
40794+ * set*uid() to execve() because too many poorly written programs
40795+ * don't check setuid() return code. Here we additionally recheck
40796+ * whether NPROC limit is still exceeded.
40797+ */
40798+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
40799+
40800+ if ((current->flags & PF_NPROC_EXCEEDED) &&
40801+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
40802+ retval = -EAGAIN;
40803+ goto out_ret;
40804+ }
40805+
40806+ /* We're below the limit (still or again), so we don't want to make
40807+ * further execve() calls fail. */
40808+ current->flags &= ~PF_NPROC_EXCEEDED;
40809
40810 retval = unshare_files(&displaced);
40811 if (retval)
40812@@ -1499,6 +1541,15 @@ int compat_do_execve(char * filename,
40813 bprm->filename = filename;
40814 bprm->interp = filename;
40815
40816+ if (gr_process_user_ban()) {
40817+ retval = -EPERM;
40818+ goto out_file;
40819+ }
40820+
40821+ retval = -EACCES;
40822+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
40823+ goto out_file;
40824+
40825 retval = bprm_mm_init(bprm);
40826 if (retval)
40827 goto out_file;
40828@@ -1528,9 +1579,40 @@ int compat_do_execve(char * filename,
40829 if (retval < 0)
40830 goto out;
40831
40832+ if (!gr_tpe_allow(file)) {
40833+ retval = -EACCES;
40834+ goto out;
40835+ }
40836+
40837+ if (gr_check_crash_exec(file)) {
40838+ retval = -EACCES;
40839+ goto out;
40840+ }
40841+
40842+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
40843+
40844+ gr_handle_exec_args_compat(bprm, argv);
40845+
40846+#ifdef CONFIG_GRKERNSEC
40847+ old_acl = current->acl;
40848+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
40849+ old_exec_file = current->exec_file;
40850+ get_file(file);
40851+ current->exec_file = file;
40852+#endif
40853+
40854+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
40855+ bprm->unsafe & LSM_UNSAFE_SHARE);
40856+ if (retval < 0)
40857+ goto out_fail;
40858+
40859 retval = search_binary_handler(bprm, regs);
40860 if (retval < 0)
40861- goto out;
40862+ goto out_fail;
40863+#ifdef CONFIG_GRKERNSEC
40864+ if (old_exec_file)
40865+ fput(old_exec_file);
40866+#endif
40867
40868 /* execve succeeded */
40869 current->fs->in_exec = 0;
40870@@ -1541,6 +1623,14 @@ int compat_do_execve(char * filename,
40871 put_files_struct(displaced);
40872 return retval;
40873
40874+out_fail:
40875+#ifdef CONFIG_GRKERNSEC
40876+ current->acl = old_acl;
40877+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
40878+ fput(current->exec_file);
40879+ current->exec_file = old_exec_file;
40880+#endif
40881+
40882 out:
40883 if (bprm->mm) {
40884 acct_arg_size(bprm, 0);
40885@@ -1711,6 +1801,8 @@ int compat_core_sys_select(int n, compat
40886 struct fdtable *fdt;
40887 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40888
40889+ pax_track_stack();
40890+
40891 if (n < 0)
40892 goto out_nofds;
40893
40894diff -urNp linux-2.6.32.45/fs/compat_ioctl.c linux-2.6.32.45/fs/compat_ioctl.c
40895--- linux-2.6.32.45/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
40896+++ linux-2.6.32.45/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
40897@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
40898 up = (struct compat_video_spu_palette __user *) arg;
40899 err = get_user(palp, &up->palette);
40900 err |= get_user(length, &up->length);
40901+ if (err)
40902+ return -EFAULT;
40903
40904 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
40905 err = put_user(compat_ptr(palp), &up_native->palette);
40906diff -urNp linux-2.6.32.45/fs/configfs/dir.c linux-2.6.32.45/fs/configfs/dir.c
40907--- linux-2.6.32.45/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
40908+++ linux-2.6.32.45/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
40909@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
40910 }
40911 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
40912 struct configfs_dirent *next;
40913- const char * name;
40914+ const unsigned char * name;
40915+ char d_name[sizeof(next->s_dentry->d_iname)];
40916 int len;
40917
40918 next = list_entry(p, struct configfs_dirent,
40919@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
40920 continue;
40921
40922 name = configfs_get_name(next);
40923- len = strlen(name);
40924+ if (next->s_dentry && name == next->s_dentry->d_iname) {
40925+ len = next->s_dentry->d_name.len;
40926+ memcpy(d_name, name, len);
40927+ name = d_name;
40928+ } else
40929+ len = strlen(name);
40930 if (next->s_dentry)
40931 ino = next->s_dentry->d_inode->i_ino;
40932 else
40933diff -urNp linux-2.6.32.45/fs/dcache.c linux-2.6.32.45/fs/dcache.c
40934--- linux-2.6.32.45/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
40935+++ linux-2.6.32.45/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
40936@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
40937
40938 static struct kmem_cache *dentry_cache __read_mostly;
40939
40940-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
40941-
40942 /*
40943 * This is the single most critical data structure when it comes
40944 * to the dcache: the hashtable for lookups. Somebody should try
40945@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
40946 mempages -= reserve;
40947
40948 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
40949- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
40950+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
40951
40952 dcache_init();
40953 inode_init();
40954diff -urNp linux-2.6.32.45/fs/dlm/lockspace.c linux-2.6.32.45/fs/dlm/lockspace.c
40955--- linux-2.6.32.45/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
40956+++ linux-2.6.32.45/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
40957@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
40958 kfree(ls);
40959 }
40960
40961-static struct sysfs_ops dlm_attr_ops = {
40962+static const struct sysfs_ops dlm_attr_ops = {
40963 .show = dlm_attr_show,
40964 .store = dlm_attr_store,
40965 };
40966diff -urNp linux-2.6.32.45/fs/ecryptfs/inode.c linux-2.6.32.45/fs/ecryptfs/inode.c
40967--- linux-2.6.32.45/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40968+++ linux-2.6.32.45/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
40969@@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
40970 old_fs = get_fs();
40971 set_fs(get_ds());
40972 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
40973- (char __user *)lower_buf,
40974+ (__force char __user *)lower_buf,
40975 lower_bufsiz);
40976 set_fs(old_fs);
40977 if (rc < 0)
40978@@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
40979 }
40980 old_fs = get_fs();
40981 set_fs(get_ds());
40982- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
40983+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
40984 set_fs(old_fs);
40985 if (rc < 0)
40986 goto out_free;
40987diff -urNp linux-2.6.32.45/fs/exec.c linux-2.6.32.45/fs/exec.c
40988--- linux-2.6.32.45/fs/exec.c 2011-06-25 12:55:34.000000000 -0400
40989+++ linux-2.6.32.45/fs/exec.c 2011-08-11 19:56:19.000000000 -0400
40990@@ -56,12 +56,24 @@
40991 #include <linux/fsnotify.h>
40992 #include <linux/fs_struct.h>
40993 #include <linux/pipe_fs_i.h>
40994+#include <linux/random.h>
40995+#include <linux/seq_file.h>
40996+
40997+#ifdef CONFIG_PAX_REFCOUNT
40998+#include <linux/kallsyms.h>
40999+#include <linux/kdebug.h>
41000+#endif
41001
41002 #include <asm/uaccess.h>
41003 #include <asm/mmu_context.h>
41004 #include <asm/tlb.h>
41005 #include "internal.h"
41006
41007+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41008+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41009+EXPORT_SYMBOL(pax_set_initial_flags_func);
41010+#endif
41011+
41012 int core_uses_pid;
41013 char core_pattern[CORENAME_MAX_SIZE] = "core";
41014 unsigned int core_pipe_limit;
41015@@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
41016 goto out;
41017
41018 file = do_filp_open(AT_FDCWD, tmp,
41019- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
41020+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
41021 MAY_READ | MAY_EXEC | MAY_OPEN);
41022 putname(tmp);
41023 error = PTR_ERR(file);
41024@@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
41025 int write)
41026 {
41027 struct page *page;
41028- int ret;
41029
41030-#ifdef CONFIG_STACK_GROWSUP
41031- if (write) {
41032- ret = expand_stack_downwards(bprm->vma, pos);
41033- if (ret < 0)
41034- return NULL;
41035- }
41036-#endif
41037- ret = get_user_pages(current, bprm->mm, pos,
41038- 1, write, 1, &page, NULL);
41039- if (ret <= 0)
41040+ if (0 > expand_stack_downwards(bprm->vma, pos))
41041+ return NULL;
41042+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41043 return NULL;
41044
41045 if (write) {
41046@@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
41047 vma->vm_end = STACK_TOP_MAX;
41048 vma->vm_start = vma->vm_end - PAGE_SIZE;
41049 vma->vm_flags = VM_STACK_FLAGS;
41050+
41051+#ifdef CONFIG_PAX_SEGMEXEC
41052+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41053+#endif
41054+
41055 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41056
41057 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
41058@@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
41059 mm->stack_vm = mm->total_vm = 1;
41060 up_write(&mm->mmap_sem);
41061 bprm->p = vma->vm_end - sizeof(void *);
41062+
41063+#ifdef CONFIG_PAX_RANDUSTACK
41064+ if (randomize_va_space)
41065+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41066+#endif
41067+
41068 return 0;
41069 err:
41070 up_write(&mm->mmap_sem);
41071@@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
41072 int r;
41073 mm_segment_t oldfs = get_fs();
41074 set_fs(KERNEL_DS);
41075- r = copy_strings(argc, (char __user * __user *)argv, bprm);
41076+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
41077 set_fs(oldfs);
41078 return r;
41079 }
41080@@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
41081 unsigned long new_end = old_end - shift;
41082 struct mmu_gather *tlb;
41083
41084- BUG_ON(new_start > new_end);
41085+ if (new_start >= new_end || new_start < mmap_min_addr)
41086+ return -ENOMEM;
41087
41088 /*
41089 * ensure there are no vmas between where we want to go
41090@@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
41091 if (vma != find_vma(mm, new_start))
41092 return -EFAULT;
41093
41094+#ifdef CONFIG_PAX_SEGMEXEC
41095+ BUG_ON(pax_find_mirror_vma(vma));
41096+#endif
41097+
41098 /*
41099 * cover the whole range: [new_start, old_end)
41100 */
41101@@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
41102 stack_top = arch_align_stack(stack_top);
41103 stack_top = PAGE_ALIGN(stack_top);
41104
41105- if (unlikely(stack_top < mmap_min_addr) ||
41106- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41107- return -ENOMEM;
41108-
41109 stack_shift = vma->vm_end - stack_top;
41110
41111 bprm->p -= stack_shift;
41112@@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
41113 bprm->exec -= stack_shift;
41114
41115 down_write(&mm->mmap_sem);
41116+
41117+ /* Move stack pages down in memory. */
41118+ if (stack_shift) {
41119+ ret = shift_arg_pages(vma, stack_shift);
41120+ if (ret)
41121+ goto out_unlock;
41122+ }
41123+
41124 vm_flags = VM_STACK_FLAGS;
41125
41126 /*
41127@@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
41128 vm_flags &= ~VM_EXEC;
41129 vm_flags |= mm->def_flags;
41130
41131+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41132+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41133+ vm_flags &= ~VM_EXEC;
41134+
41135+#ifdef CONFIG_PAX_MPROTECT
41136+ if (mm->pax_flags & MF_PAX_MPROTECT)
41137+ vm_flags &= ~VM_MAYEXEC;
41138+#endif
41139+
41140+ }
41141+#endif
41142+
41143 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
41144 vm_flags);
41145 if (ret)
41146 goto out_unlock;
41147 BUG_ON(prev != vma);
41148
41149- /* Move stack pages down in memory. */
41150- if (stack_shift) {
41151- ret = shift_arg_pages(vma, stack_shift);
41152- if (ret)
41153- goto out_unlock;
41154- }
41155-
41156 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
41157 stack_size = vma->vm_end - vma->vm_start;
41158 /*
41159@@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
41160 int err;
41161
41162 file = do_filp_open(AT_FDCWD, name,
41163- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
41164+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
41165 MAY_EXEC | MAY_OPEN);
41166 if (IS_ERR(file))
41167 goto out;
41168@@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
41169 old_fs = get_fs();
41170 set_fs(get_ds());
41171 /* The cast to a user pointer is valid due to the set_fs() */
41172- result = vfs_read(file, (void __user *)addr, count, &pos);
41173+ result = vfs_read(file, (__force void __user *)addr, count, &pos);
41174 set_fs(old_fs);
41175 return result;
41176 }
41177@@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
41178 }
41179 rcu_read_unlock();
41180
41181- if (p->fs->users > n_fs) {
41182+ if (atomic_read(&p->fs->users) > n_fs) {
41183 bprm->unsafe |= LSM_UNSAFE_SHARE;
41184 } else {
41185 res = -EAGAIN;
41186@@ -1347,11 +1376,35 @@ int do_execve(char * filename,
41187 char __user *__user *envp,
41188 struct pt_regs * regs)
41189 {
41190+#ifdef CONFIG_GRKERNSEC
41191+ struct file *old_exec_file;
41192+ struct acl_subject_label *old_acl;
41193+ struct rlimit old_rlim[RLIM_NLIMITS];
41194+#endif
41195 struct linux_binprm *bprm;
41196 struct file *file;
41197 struct files_struct *displaced;
41198 bool clear_in_exec;
41199 int retval;
41200+ const struct cred *cred = current_cred();
41201+
41202+ /*
41203+ * We move the actual failure in case of RLIMIT_NPROC excess from
41204+ * set*uid() to execve() because too many poorly written programs
41205+ * don't check setuid() return code. Here we additionally recheck
41206+ * whether NPROC limit is still exceeded.
41207+ */
41208+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41209+
41210+ if ((current->flags & PF_NPROC_EXCEEDED) &&
41211+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
41212+ retval = -EAGAIN;
41213+ goto out_ret;
41214+ }
41215+
41216+ /* We're below the limit (still or again), so we don't want to make
41217+ * further execve() calls fail. */
41218+ current->flags &= ~PF_NPROC_EXCEEDED;
41219
41220 retval = unshare_files(&displaced);
41221 if (retval)
41222@@ -1383,6 +1436,16 @@ int do_execve(char * filename,
41223 bprm->filename = filename;
41224 bprm->interp = filename;
41225
41226+ if (gr_process_user_ban()) {
41227+ retval = -EPERM;
41228+ goto out_file;
41229+ }
41230+
41231+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41232+ retval = -EACCES;
41233+ goto out_file;
41234+ }
41235+
41236 retval = bprm_mm_init(bprm);
41237 if (retval)
41238 goto out_file;
41239@@ -1412,10 +1475,41 @@ int do_execve(char * filename,
41240 if (retval < 0)
41241 goto out;
41242
41243+ if (!gr_tpe_allow(file)) {
41244+ retval = -EACCES;
41245+ goto out;
41246+ }
41247+
41248+ if (gr_check_crash_exec(file)) {
41249+ retval = -EACCES;
41250+ goto out;
41251+ }
41252+
41253+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41254+
41255+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
41256+
41257+#ifdef CONFIG_GRKERNSEC
41258+ old_acl = current->acl;
41259+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41260+ old_exec_file = current->exec_file;
41261+ get_file(file);
41262+ current->exec_file = file;
41263+#endif
41264+
41265+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41266+ bprm->unsafe & LSM_UNSAFE_SHARE);
41267+ if (retval < 0)
41268+ goto out_fail;
41269+
41270 current->flags &= ~PF_KTHREAD;
41271 retval = search_binary_handler(bprm,regs);
41272 if (retval < 0)
41273- goto out;
41274+ goto out_fail;
41275+#ifdef CONFIG_GRKERNSEC
41276+ if (old_exec_file)
41277+ fput(old_exec_file);
41278+#endif
41279
41280 /* execve succeeded */
41281 current->fs->in_exec = 0;
41282@@ -1426,6 +1520,14 @@ int do_execve(char * filename,
41283 put_files_struct(displaced);
41284 return retval;
41285
41286+out_fail:
41287+#ifdef CONFIG_GRKERNSEC
41288+ current->acl = old_acl;
41289+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41290+ fput(current->exec_file);
41291+ current->exec_file = old_exec_file;
41292+#endif
41293+
41294 out:
41295 if (bprm->mm) {
41296 acct_arg_size(bprm, 0);
41297@@ -1591,6 +1693,220 @@ out:
41298 return ispipe;
41299 }
41300
41301+int pax_check_flags(unsigned long *flags)
41302+{
41303+ int retval = 0;
41304+
41305+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41306+ if (*flags & MF_PAX_SEGMEXEC)
41307+ {
41308+ *flags &= ~MF_PAX_SEGMEXEC;
41309+ retval = -EINVAL;
41310+ }
41311+#endif
41312+
41313+ if ((*flags & MF_PAX_PAGEEXEC)
41314+
41315+#ifdef CONFIG_PAX_PAGEEXEC
41316+ && (*flags & MF_PAX_SEGMEXEC)
41317+#endif
41318+
41319+ )
41320+ {
41321+ *flags &= ~MF_PAX_PAGEEXEC;
41322+ retval = -EINVAL;
41323+ }
41324+
41325+ if ((*flags & MF_PAX_MPROTECT)
41326+
41327+#ifdef CONFIG_PAX_MPROTECT
41328+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41329+#endif
41330+
41331+ )
41332+ {
41333+ *flags &= ~MF_PAX_MPROTECT;
41334+ retval = -EINVAL;
41335+ }
41336+
41337+ if ((*flags & MF_PAX_EMUTRAMP)
41338+
41339+#ifdef CONFIG_PAX_EMUTRAMP
41340+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41341+#endif
41342+
41343+ )
41344+ {
41345+ *flags &= ~MF_PAX_EMUTRAMP;
41346+ retval = -EINVAL;
41347+ }
41348+
41349+ return retval;
41350+}
41351+
41352+EXPORT_SYMBOL(pax_check_flags);
41353+
41354+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41355+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41356+{
41357+ struct task_struct *tsk = current;
41358+ struct mm_struct *mm = current->mm;
41359+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41360+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41361+ char *path_exec = NULL;
41362+ char *path_fault = NULL;
41363+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
41364+
41365+ if (buffer_exec && buffer_fault) {
41366+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41367+
41368+ down_read(&mm->mmap_sem);
41369+ vma = mm->mmap;
41370+ while (vma && (!vma_exec || !vma_fault)) {
41371+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41372+ vma_exec = vma;
41373+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41374+ vma_fault = vma;
41375+ vma = vma->vm_next;
41376+ }
41377+ if (vma_exec) {
41378+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41379+ if (IS_ERR(path_exec))
41380+ path_exec = "<path too long>";
41381+ else {
41382+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
41383+ if (path_exec) {
41384+ *path_exec = 0;
41385+ path_exec = buffer_exec;
41386+ } else
41387+ path_exec = "<path too long>";
41388+ }
41389+ }
41390+ if (vma_fault) {
41391+ start = vma_fault->vm_start;
41392+ end = vma_fault->vm_end;
41393+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
41394+ if (vma_fault->vm_file) {
41395+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
41396+ if (IS_ERR(path_fault))
41397+ path_fault = "<path too long>";
41398+ else {
41399+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
41400+ if (path_fault) {
41401+ *path_fault = 0;
41402+ path_fault = buffer_fault;
41403+ } else
41404+ path_fault = "<path too long>";
41405+ }
41406+ } else
41407+ path_fault = "<anonymous mapping>";
41408+ }
41409+ up_read(&mm->mmap_sem);
41410+ }
41411+ if (tsk->signal->curr_ip)
41412+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
41413+ else
41414+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
41415+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
41416+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
41417+ task_uid(tsk), task_euid(tsk), pc, sp);
41418+ free_page((unsigned long)buffer_exec);
41419+ free_page((unsigned long)buffer_fault);
41420+ pax_report_insns(pc, sp);
41421+ do_coredump(SIGKILL, SIGKILL, regs);
41422+}
41423+#endif
41424+
41425+#ifdef CONFIG_PAX_REFCOUNT
41426+void pax_report_refcount_overflow(struct pt_regs *regs)
41427+{
41428+ if (current->signal->curr_ip)
41429+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41430+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
41431+ else
41432+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41433+ current->comm, task_pid_nr(current), current_uid(), current_euid());
41434+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
41435+ show_regs(regs);
41436+ force_sig_specific(SIGKILL, current);
41437+}
41438+#endif
41439+
41440+#ifdef CONFIG_PAX_USERCOPY
41441+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41442+int object_is_on_stack(const void *obj, unsigned long len)
41443+{
41444+ const void * const stack = task_stack_page(current);
41445+ const void * const stackend = stack + THREAD_SIZE;
41446+
41447+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41448+ const void *frame = NULL;
41449+ const void *oldframe;
41450+#endif
41451+
41452+ if (obj + len < obj)
41453+ return -1;
41454+
41455+ if (obj + len <= stack || stackend <= obj)
41456+ return 0;
41457+
41458+ if (obj < stack || stackend < obj + len)
41459+ return -1;
41460+
41461+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41462+ oldframe = __builtin_frame_address(1);
41463+ if (oldframe)
41464+ frame = __builtin_frame_address(2);
41465+ /*
41466+ low ----------------------------------------------> high
41467+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
41468+ ^----------------^
41469+ allow copies only within here
41470+ */
41471+ while (stack <= frame && frame < stackend) {
41472+ /* if obj + len extends past the last frame, this
41473+ check won't pass and the next frame will be 0,
41474+ causing us to bail out and correctly report
41475+ the copy as invalid
41476+ */
41477+ if (obj + len <= frame)
41478+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41479+ oldframe = frame;
41480+ frame = *(const void * const *)frame;
41481+ }
41482+ return -1;
41483+#else
41484+ return 1;
41485+#endif
41486+}
41487+
41488+
41489+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41490+{
41491+ if (current->signal->curr_ip)
41492+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41493+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41494+ else
41495+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41496+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41497+
41498+ dump_stack();
41499+ gr_handle_kernel_exploit();
41500+ do_group_exit(SIGKILL);
41501+}
41502+#endif
41503+
41504+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41505+void pax_track_stack(void)
41506+{
41507+ unsigned long sp = (unsigned long)&sp;
41508+ if (sp < current_thread_info()->lowest_stack &&
41509+ sp > (unsigned long)task_stack_page(current))
41510+ current_thread_info()->lowest_stack = sp;
41511+}
41512+EXPORT_SYMBOL(pax_track_stack);
41513+#endif
41514+
41515 static int zap_process(struct task_struct *start)
41516 {
41517 struct task_struct *t;
41518@@ -1793,17 +2109,17 @@ static void wait_for_dump_helpers(struct
41519 pipe = file->f_path.dentry->d_inode->i_pipe;
41520
41521 pipe_lock(pipe);
41522- pipe->readers++;
41523- pipe->writers--;
41524+ atomic_inc(&pipe->readers);
41525+ atomic_dec(&pipe->writers);
41526
41527- while ((pipe->readers > 1) && (!signal_pending(current))) {
41528+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41529 wake_up_interruptible_sync(&pipe->wait);
41530 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41531 pipe_wait(pipe);
41532 }
41533
41534- pipe->readers--;
41535- pipe->writers++;
41536+ atomic_dec(&pipe->readers);
41537+ atomic_inc(&pipe->writers);
41538 pipe_unlock(pipe);
41539
41540 }
41541@@ -1826,10 +2142,13 @@ void do_coredump(long signr, int exit_co
41542 char **helper_argv = NULL;
41543 int helper_argc = 0;
41544 int dump_count = 0;
41545- static atomic_t core_dump_count = ATOMIC_INIT(0);
41546+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41547
41548 audit_core_dumps(signr);
41549
41550+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41551+ gr_handle_brute_attach(current, mm->flags);
41552+
41553 binfmt = mm->binfmt;
41554 if (!binfmt || !binfmt->core_dump)
41555 goto fail;
41556@@ -1874,6 +2193,8 @@ void do_coredump(long signr, int exit_co
41557 */
41558 clear_thread_flag(TIF_SIGPENDING);
41559
41560+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
41561+
41562 /*
41563 * lock_kernel() because format_corename() is controlled by sysctl, which
41564 * uses lock_kernel()
41565@@ -1908,7 +2229,7 @@ void do_coredump(long signr, int exit_co
41566 goto fail_unlock;
41567 }
41568
41569- dump_count = atomic_inc_return(&core_dump_count);
41570+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
41571 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41572 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41573 task_tgid_vnr(current), current->comm);
41574@@ -1972,7 +2293,7 @@ close_fail:
41575 filp_close(file, NULL);
41576 fail_dropcount:
41577 if (dump_count)
41578- atomic_dec(&core_dump_count);
41579+ atomic_dec_unchecked(&core_dump_count);
41580 fail_unlock:
41581 if (helper_argv)
41582 argv_free(helper_argv);
41583diff -urNp linux-2.6.32.45/fs/ext2/balloc.c linux-2.6.32.45/fs/ext2/balloc.c
41584--- linux-2.6.32.45/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
41585+++ linux-2.6.32.45/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
41586@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
41587
41588 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41589 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41590- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41591+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41592 sbi->s_resuid != current_fsuid() &&
41593 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41594 return 0;
41595diff -urNp linux-2.6.32.45/fs/ext3/balloc.c linux-2.6.32.45/fs/ext3/balloc.c
41596--- linux-2.6.32.45/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
41597+++ linux-2.6.32.45/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
41598@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
41599
41600 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41601 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41602- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41603+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41604 sbi->s_resuid != current_fsuid() &&
41605 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41606 return 0;
41607diff -urNp linux-2.6.32.45/fs/ext4/balloc.c linux-2.6.32.45/fs/ext4/balloc.c
41608--- linux-2.6.32.45/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
41609+++ linux-2.6.32.45/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
41610@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
41611 /* Hm, nope. Are (enough) root reserved blocks available? */
41612 if (sbi->s_resuid == current_fsuid() ||
41613 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
41614- capable(CAP_SYS_RESOURCE)) {
41615+ capable_nolog(CAP_SYS_RESOURCE)) {
41616 if (free_blocks >= (nblocks + dirty_blocks))
41617 return 1;
41618 }
41619diff -urNp linux-2.6.32.45/fs/ext4/ext4.h linux-2.6.32.45/fs/ext4/ext4.h
41620--- linux-2.6.32.45/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
41621+++ linux-2.6.32.45/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
41622@@ -1078,19 +1078,19 @@ struct ext4_sb_info {
41623
41624 /* stats for buddy allocator */
41625 spinlock_t s_mb_pa_lock;
41626- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
41627- atomic_t s_bal_success; /* we found long enough chunks */
41628- atomic_t s_bal_allocated; /* in blocks */
41629- atomic_t s_bal_ex_scanned; /* total extents scanned */
41630- atomic_t s_bal_goals; /* goal hits */
41631- atomic_t s_bal_breaks; /* too long searches */
41632- atomic_t s_bal_2orders; /* 2^order hits */
41633+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
41634+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
41635+ atomic_unchecked_t s_bal_allocated; /* in blocks */
41636+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
41637+ atomic_unchecked_t s_bal_goals; /* goal hits */
41638+ atomic_unchecked_t s_bal_breaks; /* too long searches */
41639+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
41640 spinlock_t s_bal_lock;
41641 unsigned long s_mb_buddies_generated;
41642 unsigned long long s_mb_generation_time;
41643- atomic_t s_mb_lost_chunks;
41644- atomic_t s_mb_preallocated;
41645- atomic_t s_mb_discarded;
41646+ atomic_unchecked_t s_mb_lost_chunks;
41647+ atomic_unchecked_t s_mb_preallocated;
41648+ atomic_unchecked_t s_mb_discarded;
41649 atomic_t s_lock_busy;
41650
41651 /* locality groups */
41652diff -urNp linux-2.6.32.45/fs/ext4/mballoc.c linux-2.6.32.45/fs/ext4/mballoc.c
41653--- linux-2.6.32.45/fs/ext4/mballoc.c 2011-06-25 12:55:34.000000000 -0400
41654+++ linux-2.6.32.45/fs/ext4/mballoc.c 2011-06-25 12:56:37.000000000 -0400
41655@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
41656 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
41657
41658 if (EXT4_SB(sb)->s_mb_stats)
41659- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
41660+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
41661
41662 break;
41663 }
41664@@ -2131,7 +2131,7 @@ repeat:
41665 ac->ac_status = AC_STATUS_CONTINUE;
41666 ac->ac_flags |= EXT4_MB_HINT_FIRST;
41667 cr = 3;
41668- atomic_inc(&sbi->s_mb_lost_chunks);
41669+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
41670 goto repeat;
41671 }
41672 }
41673@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
41674 ext4_grpblk_t counters[16];
41675 } sg;
41676
41677+ pax_track_stack();
41678+
41679 group--;
41680 if (group == 0)
41681 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
41682@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
41683 if (sbi->s_mb_stats) {
41684 printk(KERN_INFO
41685 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
41686- atomic_read(&sbi->s_bal_allocated),
41687- atomic_read(&sbi->s_bal_reqs),
41688- atomic_read(&sbi->s_bal_success));
41689+ atomic_read_unchecked(&sbi->s_bal_allocated),
41690+ atomic_read_unchecked(&sbi->s_bal_reqs),
41691+ atomic_read_unchecked(&sbi->s_bal_success));
41692 printk(KERN_INFO
41693 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
41694 "%u 2^N hits, %u breaks, %u lost\n",
41695- atomic_read(&sbi->s_bal_ex_scanned),
41696- atomic_read(&sbi->s_bal_goals),
41697- atomic_read(&sbi->s_bal_2orders),
41698- atomic_read(&sbi->s_bal_breaks),
41699- atomic_read(&sbi->s_mb_lost_chunks));
41700+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
41701+ atomic_read_unchecked(&sbi->s_bal_goals),
41702+ atomic_read_unchecked(&sbi->s_bal_2orders),
41703+ atomic_read_unchecked(&sbi->s_bal_breaks),
41704+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
41705 printk(KERN_INFO
41706 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
41707 sbi->s_mb_buddies_generated++,
41708 sbi->s_mb_generation_time);
41709 printk(KERN_INFO
41710 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
41711- atomic_read(&sbi->s_mb_preallocated),
41712- atomic_read(&sbi->s_mb_discarded));
41713+ atomic_read_unchecked(&sbi->s_mb_preallocated),
41714+ atomic_read_unchecked(&sbi->s_mb_discarded));
41715 }
41716
41717 free_percpu(sbi->s_locality_groups);
41718@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
41719 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
41720
41721 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
41722- atomic_inc(&sbi->s_bal_reqs);
41723- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41724+ atomic_inc_unchecked(&sbi->s_bal_reqs);
41725+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
41726 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
41727- atomic_inc(&sbi->s_bal_success);
41728- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
41729+ atomic_inc_unchecked(&sbi->s_bal_success);
41730+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
41731 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
41732 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
41733- atomic_inc(&sbi->s_bal_goals);
41734+ atomic_inc_unchecked(&sbi->s_bal_goals);
41735 if (ac->ac_found > sbi->s_mb_max_to_scan)
41736- atomic_inc(&sbi->s_bal_breaks);
41737+ atomic_inc_unchecked(&sbi->s_bal_breaks);
41738 }
41739
41740 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
41741@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
41742 trace_ext4_mb_new_inode_pa(ac, pa);
41743
41744 ext4_mb_use_inode_pa(ac, pa);
41745- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41746+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41747
41748 ei = EXT4_I(ac->ac_inode);
41749 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41750@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
41751 trace_ext4_mb_new_group_pa(ac, pa);
41752
41753 ext4_mb_use_group_pa(ac, pa);
41754- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41755+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
41756
41757 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
41758 lg = ac->ac_lg;
41759@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
41760 * from the bitmap and continue.
41761 */
41762 }
41763- atomic_add(free, &sbi->s_mb_discarded);
41764+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
41765
41766 return err;
41767 }
41768@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
41769 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
41770 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
41771 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
41772- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41773+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
41774
41775 if (ac) {
41776 ac->ac_sb = sb;
41777diff -urNp linux-2.6.32.45/fs/ext4/super.c linux-2.6.32.45/fs/ext4/super.c
41778--- linux-2.6.32.45/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
41779+++ linux-2.6.32.45/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
41780@@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
41781 }
41782
41783
41784-static struct sysfs_ops ext4_attr_ops = {
41785+static const struct sysfs_ops ext4_attr_ops = {
41786 .show = ext4_attr_show,
41787 .store = ext4_attr_store,
41788 };
41789diff -urNp linux-2.6.32.45/fs/fcntl.c linux-2.6.32.45/fs/fcntl.c
41790--- linux-2.6.32.45/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
41791+++ linux-2.6.32.45/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
41792@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
41793 if (err)
41794 return err;
41795
41796+ if (gr_handle_chroot_fowner(pid, type))
41797+ return -ENOENT;
41798+ if (gr_check_protected_task_fowner(pid, type))
41799+ return -EACCES;
41800+
41801 f_modown(filp, pid, type, force);
41802 return 0;
41803 }
41804@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
41805 switch (cmd) {
41806 case F_DUPFD:
41807 case F_DUPFD_CLOEXEC:
41808+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
41809 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41810 break;
41811 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
41812diff -urNp linux-2.6.32.45/fs/fifo.c linux-2.6.32.45/fs/fifo.c
41813--- linux-2.6.32.45/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
41814+++ linux-2.6.32.45/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
41815@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
41816 */
41817 filp->f_op = &read_pipefifo_fops;
41818 pipe->r_counter++;
41819- if (pipe->readers++ == 0)
41820+ if (atomic_inc_return(&pipe->readers) == 1)
41821 wake_up_partner(inode);
41822
41823- if (!pipe->writers) {
41824+ if (!atomic_read(&pipe->writers)) {
41825 if ((filp->f_flags & O_NONBLOCK)) {
41826 /* suppress POLLHUP until we have
41827 * seen a writer */
41828@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
41829 * errno=ENXIO when there is no process reading the FIFO.
41830 */
41831 ret = -ENXIO;
41832- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
41833+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
41834 goto err;
41835
41836 filp->f_op = &write_pipefifo_fops;
41837 pipe->w_counter++;
41838- if (!pipe->writers++)
41839+ if (atomic_inc_return(&pipe->writers) == 1)
41840 wake_up_partner(inode);
41841
41842- if (!pipe->readers) {
41843+ if (!atomic_read(&pipe->readers)) {
41844 wait_for_partner(inode, &pipe->r_counter);
41845 if (signal_pending(current))
41846 goto err_wr;
41847@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
41848 */
41849 filp->f_op = &rdwr_pipefifo_fops;
41850
41851- pipe->readers++;
41852- pipe->writers++;
41853+ atomic_inc(&pipe->readers);
41854+ atomic_inc(&pipe->writers);
41855 pipe->r_counter++;
41856 pipe->w_counter++;
41857- if (pipe->readers == 1 || pipe->writers == 1)
41858+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
41859 wake_up_partner(inode);
41860 break;
41861
41862@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
41863 return 0;
41864
41865 err_rd:
41866- if (!--pipe->readers)
41867+ if (atomic_dec_and_test(&pipe->readers))
41868 wake_up_interruptible(&pipe->wait);
41869 ret = -ERESTARTSYS;
41870 goto err;
41871
41872 err_wr:
41873- if (!--pipe->writers)
41874+ if (atomic_dec_and_test(&pipe->writers))
41875 wake_up_interruptible(&pipe->wait);
41876 ret = -ERESTARTSYS;
41877 goto err;
41878
41879 err:
41880- if (!pipe->readers && !pipe->writers)
41881+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
41882 free_pipe_info(inode);
41883
41884 err_nocleanup:
41885diff -urNp linux-2.6.32.45/fs/file.c linux-2.6.32.45/fs/file.c
41886--- linux-2.6.32.45/fs/file.c 2011-03-27 14:31:47.000000000 -0400
41887+++ linux-2.6.32.45/fs/file.c 2011-04-17 15:56:46.000000000 -0400
41888@@ -14,6 +14,7 @@
41889 #include <linux/slab.h>
41890 #include <linux/vmalloc.h>
41891 #include <linux/file.h>
41892+#include <linux/security.h>
41893 #include <linux/fdtable.h>
41894 #include <linux/bitops.h>
41895 #include <linux/interrupt.h>
41896@@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
41897 * N.B. For clone tasks sharing a files structure, this test
41898 * will limit the total number of files that can be opened.
41899 */
41900+
41901+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
41902 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
41903 return -EMFILE;
41904
41905diff -urNp linux-2.6.32.45/fs/filesystems.c linux-2.6.32.45/fs/filesystems.c
41906--- linux-2.6.32.45/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
41907+++ linux-2.6.32.45/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
41908@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
41909 int len = dot ? dot - name : strlen(name);
41910
41911 fs = __get_fs_type(name, len);
41912+
41913+#ifdef CONFIG_GRKERNSEC_MODHARDEN
41914+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
41915+#else
41916 if (!fs && (request_module("%.*s", len, name) == 0))
41917+#endif
41918 fs = __get_fs_type(name, len);
41919
41920 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
41921diff -urNp linux-2.6.32.45/fs/fscache/cookie.c linux-2.6.32.45/fs/fscache/cookie.c
41922--- linux-2.6.32.45/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
41923+++ linux-2.6.32.45/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
41924@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
41925 parent ? (char *) parent->def->name : "<no-parent>",
41926 def->name, netfs_data);
41927
41928- fscache_stat(&fscache_n_acquires);
41929+ fscache_stat_unchecked(&fscache_n_acquires);
41930
41931 /* if there's no parent cookie, then we don't create one here either */
41932 if (!parent) {
41933- fscache_stat(&fscache_n_acquires_null);
41934+ fscache_stat_unchecked(&fscache_n_acquires_null);
41935 _leave(" [no parent]");
41936 return NULL;
41937 }
41938@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
41939 /* allocate and initialise a cookie */
41940 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
41941 if (!cookie) {
41942- fscache_stat(&fscache_n_acquires_oom);
41943+ fscache_stat_unchecked(&fscache_n_acquires_oom);
41944 _leave(" [ENOMEM]");
41945 return NULL;
41946 }
41947@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
41948
41949 switch (cookie->def->type) {
41950 case FSCACHE_COOKIE_TYPE_INDEX:
41951- fscache_stat(&fscache_n_cookie_index);
41952+ fscache_stat_unchecked(&fscache_n_cookie_index);
41953 break;
41954 case FSCACHE_COOKIE_TYPE_DATAFILE:
41955- fscache_stat(&fscache_n_cookie_data);
41956+ fscache_stat_unchecked(&fscache_n_cookie_data);
41957 break;
41958 default:
41959- fscache_stat(&fscache_n_cookie_special);
41960+ fscache_stat_unchecked(&fscache_n_cookie_special);
41961 break;
41962 }
41963
41964@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
41965 if (fscache_acquire_non_index_cookie(cookie) < 0) {
41966 atomic_dec(&parent->n_children);
41967 __fscache_cookie_put(cookie);
41968- fscache_stat(&fscache_n_acquires_nobufs);
41969+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
41970 _leave(" = NULL");
41971 return NULL;
41972 }
41973 }
41974
41975- fscache_stat(&fscache_n_acquires_ok);
41976+ fscache_stat_unchecked(&fscache_n_acquires_ok);
41977 _leave(" = %p", cookie);
41978 return cookie;
41979 }
41980@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
41981 cache = fscache_select_cache_for_object(cookie->parent);
41982 if (!cache) {
41983 up_read(&fscache_addremove_sem);
41984- fscache_stat(&fscache_n_acquires_no_cache);
41985+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
41986 _leave(" = -ENOMEDIUM [no cache]");
41987 return -ENOMEDIUM;
41988 }
41989@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
41990 object = cache->ops->alloc_object(cache, cookie);
41991 fscache_stat_d(&fscache_n_cop_alloc_object);
41992 if (IS_ERR(object)) {
41993- fscache_stat(&fscache_n_object_no_alloc);
41994+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
41995 ret = PTR_ERR(object);
41996 goto error;
41997 }
41998
41999- fscache_stat(&fscache_n_object_alloc);
42000+ fscache_stat_unchecked(&fscache_n_object_alloc);
42001
42002 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42003
42004@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
42005 struct fscache_object *object;
42006 struct hlist_node *_p;
42007
42008- fscache_stat(&fscache_n_updates);
42009+ fscache_stat_unchecked(&fscache_n_updates);
42010
42011 if (!cookie) {
42012- fscache_stat(&fscache_n_updates_null);
42013+ fscache_stat_unchecked(&fscache_n_updates_null);
42014 _leave(" [no cookie]");
42015 return;
42016 }
42017@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
42018 struct fscache_object *object;
42019 unsigned long event;
42020
42021- fscache_stat(&fscache_n_relinquishes);
42022+ fscache_stat_unchecked(&fscache_n_relinquishes);
42023 if (retire)
42024- fscache_stat(&fscache_n_relinquishes_retire);
42025+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42026
42027 if (!cookie) {
42028- fscache_stat(&fscache_n_relinquishes_null);
42029+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
42030 _leave(" [no cookie]");
42031 return;
42032 }
42033@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
42034
42035 /* wait for the cookie to finish being instantiated (or to fail) */
42036 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42037- fscache_stat(&fscache_n_relinquishes_waitcrt);
42038+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42039 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42040 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42041 }
42042diff -urNp linux-2.6.32.45/fs/fscache/internal.h linux-2.6.32.45/fs/fscache/internal.h
42043--- linux-2.6.32.45/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
42044+++ linux-2.6.32.45/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
42045@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
42046 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42047 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42048
42049-extern atomic_t fscache_n_op_pend;
42050-extern atomic_t fscache_n_op_run;
42051-extern atomic_t fscache_n_op_enqueue;
42052-extern atomic_t fscache_n_op_deferred_release;
42053-extern atomic_t fscache_n_op_release;
42054-extern atomic_t fscache_n_op_gc;
42055-extern atomic_t fscache_n_op_cancelled;
42056-extern atomic_t fscache_n_op_rejected;
42057-
42058-extern atomic_t fscache_n_attr_changed;
42059-extern atomic_t fscache_n_attr_changed_ok;
42060-extern atomic_t fscache_n_attr_changed_nobufs;
42061-extern atomic_t fscache_n_attr_changed_nomem;
42062-extern atomic_t fscache_n_attr_changed_calls;
42063-
42064-extern atomic_t fscache_n_allocs;
42065-extern atomic_t fscache_n_allocs_ok;
42066-extern atomic_t fscache_n_allocs_wait;
42067-extern atomic_t fscache_n_allocs_nobufs;
42068-extern atomic_t fscache_n_allocs_intr;
42069-extern atomic_t fscache_n_allocs_object_dead;
42070-extern atomic_t fscache_n_alloc_ops;
42071-extern atomic_t fscache_n_alloc_op_waits;
42072-
42073-extern atomic_t fscache_n_retrievals;
42074-extern atomic_t fscache_n_retrievals_ok;
42075-extern atomic_t fscache_n_retrievals_wait;
42076-extern atomic_t fscache_n_retrievals_nodata;
42077-extern atomic_t fscache_n_retrievals_nobufs;
42078-extern atomic_t fscache_n_retrievals_intr;
42079-extern atomic_t fscache_n_retrievals_nomem;
42080-extern atomic_t fscache_n_retrievals_object_dead;
42081-extern atomic_t fscache_n_retrieval_ops;
42082-extern atomic_t fscache_n_retrieval_op_waits;
42083-
42084-extern atomic_t fscache_n_stores;
42085-extern atomic_t fscache_n_stores_ok;
42086-extern atomic_t fscache_n_stores_again;
42087-extern atomic_t fscache_n_stores_nobufs;
42088-extern atomic_t fscache_n_stores_oom;
42089-extern atomic_t fscache_n_store_ops;
42090-extern atomic_t fscache_n_store_calls;
42091-extern atomic_t fscache_n_store_pages;
42092-extern atomic_t fscache_n_store_radix_deletes;
42093-extern atomic_t fscache_n_store_pages_over_limit;
42094-
42095-extern atomic_t fscache_n_store_vmscan_not_storing;
42096-extern atomic_t fscache_n_store_vmscan_gone;
42097-extern atomic_t fscache_n_store_vmscan_busy;
42098-extern atomic_t fscache_n_store_vmscan_cancelled;
42099-
42100-extern atomic_t fscache_n_marks;
42101-extern atomic_t fscache_n_uncaches;
42102-
42103-extern atomic_t fscache_n_acquires;
42104-extern atomic_t fscache_n_acquires_null;
42105-extern atomic_t fscache_n_acquires_no_cache;
42106-extern atomic_t fscache_n_acquires_ok;
42107-extern atomic_t fscache_n_acquires_nobufs;
42108-extern atomic_t fscache_n_acquires_oom;
42109-
42110-extern atomic_t fscache_n_updates;
42111-extern atomic_t fscache_n_updates_null;
42112-extern atomic_t fscache_n_updates_run;
42113-
42114-extern atomic_t fscache_n_relinquishes;
42115-extern atomic_t fscache_n_relinquishes_null;
42116-extern atomic_t fscache_n_relinquishes_waitcrt;
42117-extern atomic_t fscache_n_relinquishes_retire;
42118-
42119-extern atomic_t fscache_n_cookie_index;
42120-extern atomic_t fscache_n_cookie_data;
42121-extern atomic_t fscache_n_cookie_special;
42122-
42123-extern atomic_t fscache_n_object_alloc;
42124-extern atomic_t fscache_n_object_no_alloc;
42125-extern atomic_t fscache_n_object_lookups;
42126-extern atomic_t fscache_n_object_lookups_negative;
42127-extern atomic_t fscache_n_object_lookups_positive;
42128-extern atomic_t fscache_n_object_lookups_timed_out;
42129-extern atomic_t fscache_n_object_created;
42130-extern atomic_t fscache_n_object_avail;
42131-extern atomic_t fscache_n_object_dead;
42132-
42133-extern atomic_t fscache_n_checkaux_none;
42134-extern atomic_t fscache_n_checkaux_okay;
42135-extern atomic_t fscache_n_checkaux_update;
42136-extern atomic_t fscache_n_checkaux_obsolete;
42137+extern atomic_unchecked_t fscache_n_op_pend;
42138+extern atomic_unchecked_t fscache_n_op_run;
42139+extern atomic_unchecked_t fscache_n_op_enqueue;
42140+extern atomic_unchecked_t fscache_n_op_deferred_release;
42141+extern atomic_unchecked_t fscache_n_op_release;
42142+extern atomic_unchecked_t fscache_n_op_gc;
42143+extern atomic_unchecked_t fscache_n_op_cancelled;
42144+extern atomic_unchecked_t fscache_n_op_rejected;
42145+
42146+extern atomic_unchecked_t fscache_n_attr_changed;
42147+extern atomic_unchecked_t fscache_n_attr_changed_ok;
42148+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
42149+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
42150+extern atomic_unchecked_t fscache_n_attr_changed_calls;
42151+
42152+extern atomic_unchecked_t fscache_n_allocs;
42153+extern atomic_unchecked_t fscache_n_allocs_ok;
42154+extern atomic_unchecked_t fscache_n_allocs_wait;
42155+extern atomic_unchecked_t fscache_n_allocs_nobufs;
42156+extern atomic_unchecked_t fscache_n_allocs_intr;
42157+extern atomic_unchecked_t fscache_n_allocs_object_dead;
42158+extern atomic_unchecked_t fscache_n_alloc_ops;
42159+extern atomic_unchecked_t fscache_n_alloc_op_waits;
42160+
42161+extern atomic_unchecked_t fscache_n_retrievals;
42162+extern atomic_unchecked_t fscache_n_retrievals_ok;
42163+extern atomic_unchecked_t fscache_n_retrievals_wait;
42164+extern atomic_unchecked_t fscache_n_retrievals_nodata;
42165+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42166+extern atomic_unchecked_t fscache_n_retrievals_intr;
42167+extern atomic_unchecked_t fscache_n_retrievals_nomem;
42168+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42169+extern atomic_unchecked_t fscache_n_retrieval_ops;
42170+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42171+
42172+extern atomic_unchecked_t fscache_n_stores;
42173+extern atomic_unchecked_t fscache_n_stores_ok;
42174+extern atomic_unchecked_t fscache_n_stores_again;
42175+extern atomic_unchecked_t fscache_n_stores_nobufs;
42176+extern atomic_unchecked_t fscache_n_stores_oom;
42177+extern atomic_unchecked_t fscache_n_store_ops;
42178+extern atomic_unchecked_t fscache_n_store_calls;
42179+extern atomic_unchecked_t fscache_n_store_pages;
42180+extern atomic_unchecked_t fscache_n_store_radix_deletes;
42181+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42182+
42183+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42184+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42185+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42186+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42187+
42188+extern atomic_unchecked_t fscache_n_marks;
42189+extern atomic_unchecked_t fscache_n_uncaches;
42190+
42191+extern atomic_unchecked_t fscache_n_acquires;
42192+extern atomic_unchecked_t fscache_n_acquires_null;
42193+extern atomic_unchecked_t fscache_n_acquires_no_cache;
42194+extern atomic_unchecked_t fscache_n_acquires_ok;
42195+extern atomic_unchecked_t fscache_n_acquires_nobufs;
42196+extern atomic_unchecked_t fscache_n_acquires_oom;
42197+
42198+extern atomic_unchecked_t fscache_n_updates;
42199+extern atomic_unchecked_t fscache_n_updates_null;
42200+extern atomic_unchecked_t fscache_n_updates_run;
42201+
42202+extern atomic_unchecked_t fscache_n_relinquishes;
42203+extern atomic_unchecked_t fscache_n_relinquishes_null;
42204+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42205+extern atomic_unchecked_t fscache_n_relinquishes_retire;
42206+
42207+extern atomic_unchecked_t fscache_n_cookie_index;
42208+extern atomic_unchecked_t fscache_n_cookie_data;
42209+extern atomic_unchecked_t fscache_n_cookie_special;
42210+
42211+extern atomic_unchecked_t fscache_n_object_alloc;
42212+extern atomic_unchecked_t fscache_n_object_no_alloc;
42213+extern atomic_unchecked_t fscache_n_object_lookups;
42214+extern atomic_unchecked_t fscache_n_object_lookups_negative;
42215+extern atomic_unchecked_t fscache_n_object_lookups_positive;
42216+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42217+extern atomic_unchecked_t fscache_n_object_created;
42218+extern atomic_unchecked_t fscache_n_object_avail;
42219+extern atomic_unchecked_t fscache_n_object_dead;
42220+
42221+extern atomic_unchecked_t fscache_n_checkaux_none;
42222+extern atomic_unchecked_t fscache_n_checkaux_okay;
42223+extern atomic_unchecked_t fscache_n_checkaux_update;
42224+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42225
42226 extern atomic_t fscache_n_cop_alloc_object;
42227 extern atomic_t fscache_n_cop_lookup_object;
42228@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
42229 atomic_inc(stat);
42230 }
42231
42232+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42233+{
42234+ atomic_inc_unchecked(stat);
42235+}
42236+
42237 static inline void fscache_stat_d(atomic_t *stat)
42238 {
42239 atomic_dec(stat);
42240@@ -259,6 +264,7 @@ extern const struct file_operations fsca
42241
42242 #define __fscache_stat(stat) (NULL)
42243 #define fscache_stat(stat) do {} while (0)
42244+#define fscache_stat_unchecked(stat) do {} while (0)
42245 #define fscache_stat_d(stat) do {} while (0)
42246 #endif
42247
42248diff -urNp linux-2.6.32.45/fs/fscache/object.c linux-2.6.32.45/fs/fscache/object.c
42249--- linux-2.6.32.45/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
42250+++ linux-2.6.32.45/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
42251@@ -144,7 +144,7 @@ static void fscache_object_state_machine
42252 /* update the object metadata on disk */
42253 case FSCACHE_OBJECT_UPDATING:
42254 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42255- fscache_stat(&fscache_n_updates_run);
42256+ fscache_stat_unchecked(&fscache_n_updates_run);
42257 fscache_stat(&fscache_n_cop_update_object);
42258 object->cache->ops->update_object(object);
42259 fscache_stat_d(&fscache_n_cop_update_object);
42260@@ -233,7 +233,7 @@ static void fscache_object_state_machine
42261 spin_lock(&object->lock);
42262 object->state = FSCACHE_OBJECT_DEAD;
42263 spin_unlock(&object->lock);
42264- fscache_stat(&fscache_n_object_dead);
42265+ fscache_stat_unchecked(&fscache_n_object_dead);
42266 goto terminal_transit;
42267
42268 /* handle the parent cache of this object being withdrawn from
42269@@ -248,7 +248,7 @@ static void fscache_object_state_machine
42270 spin_lock(&object->lock);
42271 object->state = FSCACHE_OBJECT_DEAD;
42272 spin_unlock(&object->lock);
42273- fscache_stat(&fscache_n_object_dead);
42274+ fscache_stat_unchecked(&fscache_n_object_dead);
42275 goto terminal_transit;
42276
42277 /* complain about the object being woken up once it is
42278@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
42279 parent->cookie->def->name, cookie->def->name,
42280 object->cache->tag->name);
42281
42282- fscache_stat(&fscache_n_object_lookups);
42283+ fscache_stat_unchecked(&fscache_n_object_lookups);
42284 fscache_stat(&fscache_n_cop_lookup_object);
42285 ret = object->cache->ops->lookup_object(object);
42286 fscache_stat_d(&fscache_n_cop_lookup_object);
42287@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
42288 if (ret == -ETIMEDOUT) {
42289 /* probably stuck behind another object, so move this one to
42290 * the back of the queue */
42291- fscache_stat(&fscache_n_object_lookups_timed_out);
42292+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
42293 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42294 }
42295
42296@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
42297
42298 spin_lock(&object->lock);
42299 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42300- fscache_stat(&fscache_n_object_lookups_negative);
42301+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
42302
42303 /* transit here to allow write requests to begin stacking up
42304 * and read requests to begin returning ENODATA */
42305@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
42306 * result, in which case there may be data available */
42307 spin_lock(&object->lock);
42308 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42309- fscache_stat(&fscache_n_object_lookups_positive);
42310+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
42311
42312 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
42313
42314@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
42315 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42316 } else {
42317 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
42318- fscache_stat(&fscache_n_object_created);
42319+ fscache_stat_unchecked(&fscache_n_object_created);
42320
42321 object->state = FSCACHE_OBJECT_AVAILABLE;
42322 spin_unlock(&object->lock);
42323@@ -633,7 +633,7 @@ static void fscache_object_available(str
42324 fscache_enqueue_dependents(object);
42325
42326 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
42327- fscache_stat(&fscache_n_object_avail);
42328+ fscache_stat_unchecked(&fscache_n_object_avail);
42329
42330 _leave("");
42331 }
42332@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
42333 enum fscache_checkaux result;
42334
42335 if (!object->cookie->def->check_aux) {
42336- fscache_stat(&fscache_n_checkaux_none);
42337+ fscache_stat_unchecked(&fscache_n_checkaux_none);
42338 return FSCACHE_CHECKAUX_OKAY;
42339 }
42340
42341@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
42342 switch (result) {
42343 /* entry okay as is */
42344 case FSCACHE_CHECKAUX_OKAY:
42345- fscache_stat(&fscache_n_checkaux_okay);
42346+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
42347 break;
42348
42349 /* entry requires update */
42350 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
42351- fscache_stat(&fscache_n_checkaux_update);
42352+ fscache_stat_unchecked(&fscache_n_checkaux_update);
42353 break;
42354
42355 /* entry requires deletion */
42356 case FSCACHE_CHECKAUX_OBSOLETE:
42357- fscache_stat(&fscache_n_checkaux_obsolete);
42358+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
42359 break;
42360
42361 default:
42362diff -urNp linux-2.6.32.45/fs/fscache/operation.c linux-2.6.32.45/fs/fscache/operation.c
42363--- linux-2.6.32.45/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
42364+++ linux-2.6.32.45/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
42365@@ -16,7 +16,7 @@
42366 #include <linux/seq_file.h>
42367 #include "internal.h"
42368
42369-atomic_t fscache_op_debug_id;
42370+atomic_unchecked_t fscache_op_debug_id;
42371 EXPORT_SYMBOL(fscache_op_debug_id);
42372
42373 /**
42374@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
42375 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
42376 ASSERTCMP(atomic_read(&op->usage), >, 0);
42377
42378- fscache_stat(&fscache_n_op_enqueue);
42379+ fscache_stat_unchecked(&fscache_n_op_enqueue);
42380 switch (op->flags & FSCACHE_OP_TYPE) {
42381 case FSCACHE_OP_FAST:
42382 _debug("queue fast");
42383@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
42384 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
42385 if (op->processor)
42386 fscache_enqueue_operation(op);
42387- fscache_stat(&fscache_n_op_run);
42388+ fscache_stat_unchecked(&fscache_n_op_run);
42389 }
42390
42391 /*
42392@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
42393 if (object->n_ops > 0) {
42394 atomic_inc(&op->usage);
42395 list_add_tail(&op->pend_link, &object->pending_ops);
42396- fscache_stat(&fscache_n_op_pend);
42397+ fscache_stat_unchecked(&fscache_n_op_pend);
42398 } else if (!list_empty(&object->pending_ops)) {
42399 atomic_inc(&op->usage);
42400 list_add_tail(&op->pend_link, &object->pending_ops);
42401- fscache_stat(&fscache_n_op_pend);
42402+ fscache_stat_unchecked(&fscache_n_op_pend);
42403 fscache_start_operations(object);
42404 } else {
42405 ASSERTCMP(object->n_in_progress, ==, 0);
42406@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
42407 object->n_exclusive++; /* reads and writes must wait */
42408 atomic_inc(&op->usage);
42409 list_add_tail(&op->pend_link, &object->pending_ops);
42410- fscache_stat(&fscache_n_op_pend);
42411+ fscache_stat_unchecked(&fscache_n_op_pend);
42412 ret = 0;
42413 } else {
42414 /* not allowed to submit ops in any other state */
42415@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
42416 if (object->n_exclusive > 0) {
42417 atomic_inc(&op->usage);
42418 list_add_tail(&op->pend_link, &object->pending_ops);
42419- fscache_stat(&fscache_n_op_pend);
42420+ fscache_stat_unchecked(&fscache_n_op_pend);
42421 } else if (!list_empty(&object->pending_ops)) {
42422 atomic_inc(&op->usage);
42423 list_add_tail(&op->pend_link, &object->pending_ops);
42424- fscache_stat(&fscache_n_op_pend);
42425+ fscache_stat_unchecked(&fscache_n_op_pend);
42426 fscache_start_operations(object);
42427 } else {
42428 ASSERTCMP(object->n_exclusive, ==, 0);
42429@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
42430 object->n_ops++;
42431 atomic_inc(&op->usage);
42432 list_add_tail(&op->pend_link, &object->pending_ops);
42433- fscache_stat(&fscache_n_op_pend);
42434+ fscache_stat_unchecked(&fscache_n_op_pend);
42435 ret = 0;
42436 } else if (object->state == FSCACHE_OBJECT_DYING ||
42437 object->state == FSCACHE_OBJECT_LC_DYING ||
42438 object->state == FSCACHE_OBJECT_WITHDRAWING) {
42439- fscache_stat(&fscache_n_op_rejected);
42440+ fscache_stat_unchecked(&fscache_n_op_rejected);
42441 ret = -ENOBUFS;
42442 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
42443 fscache_report_unexpected_submission(object, op, ostate);
42444@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
42445
42446 ret = -EBUSY;
42447 if (!list_empty(&op->pend_link)) {
42448- fscache_stat(&fscache_n_op_cancelled);
42449+ fscache_stat_unchecked(&fscache_n_op_cancelled);
42450 list_del_init(&op->pend_link);
42451 object->n_ops--;
42452 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
42453@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
42454 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
42455 BUG();
42456
42457- fscache_stat(&fscache_n_op_release);
42458+ fscache_stat_unchecked(&fscache_n_op_release);
42459
42460 if (op->release) {
42461 op->release(op);
42462@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
42463 * lock, and defer it otherwise */
42464 if (!spin_trylock(&object->lock)) {
42465 _debug("defer put");
42466- fscache_stat(&fscache_n_op_deferred_release);
42467+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
42468
42469 cache = object->cache;
42470 spin_lock(&cache->op_gc_list_lock);
42471@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
42472
42473 _debug("GC DEFERRED REL OBJ%x OP%x",
42474 object->debug_id, op->debug_id);
42475- fscache_stat(&fscache_n_op_gc);
42476+ fscache_stat_unchecked(&fscache_n_op_gc);
42477
42478 ASSERTCMP(atomic_read(&op->usage), ==, 0);
42479
42480diff -urNp linux-2.6.32.45/fs/fscache/page.c linux-2.6.32.45/fs/fscache/page.c
42481--- linux-2.6.32.45/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
42482+++ linux-2.6.32.45/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
42483@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
42484 val = radix_tree_lookup(&cookie->stores, page->index);
42485 if (!val) {
42486 rcu_read_unlock();
42487- fscache_stat(&fscache_n_store_vmscan_not_storing);
42488+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
42489 __fscache_uncache_page(cookie, page);
42490 return true;
42491 }
42492@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
42493 spin_unlock(&cookie->stores_lock);
42494
42495 if (xpage) {
42496- fscache_stat(&fscache_n_store_vmscan_cancelled);
42497- fscache_stat(&fscache_n_store_radix_deletes);
42498+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
42499+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42500 ASSERTCMP(xpage, ==, page);
42501 } else {
42502- fscache_stat(&fscache_n_store_vmscan_gone);
42503+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
42504 }
42505
42506 wake_up_bit(&cookie->flags, 0);
42507@@ -106,7 +106,7 @@ page_busy:
42508 /* we might want to wait here, but that could deadlock the allocator as
42509 * the slow-work threads writing to the cache may all end up sleeping
42510 * on memory allocation */
42511- fscache_stat(&fscache_n_store_vmscan_busy);
42512+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
42513 return false;
42514 }
42515 EXPORT_SYMBOL(__fscache_maybe_release_page);
42516@@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
42517 FSCACHE_COOKIE_STORING_TAG);
42518 if (!radix_tree_tag_get(&cookie->stores, page->index,
42519 FSCACHE_COOKIE_PENDING_TAG)) {
42520- fscache_stat(&fscache_n_store_radix_deletes);
42521+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42522 xpage = radix_tree_delete(&cookie->stores, page->index);
42523 }
42524 spin_unlock(&cookie->stores_lock);
42525@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
42526
42527 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
42528
42529- fscache_stat(&fscache_n_attr_changed_calls);
42530+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
42531
42532 if (fscache_object_is_active(object)) {
42533 fscache_set_op_state(op, "CallFS");
42534@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
42535
42536 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42537
42538- fscache_stat(&fscache_n_attr_changed);
42539+ fscache_stat_unchecked(&fscache_n_attr_changed);
42540
42541 op = kzalloc(sizeof(*op), GFP_KERNEL);
42542 if (!op) {
42543- fscache_stat(&fscache_n_attr_changed_nomem);
42544+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
42545 _leave(" = -ENOMEM");
42546 return -ENOMEM;
42547 }
42548@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
42549 if (fscache_submit_exclusive_op(object, op) < 0)
42550 goto nobufs;
42551 spin_unlock(&cookie->lock);
42552- fscache_stat(&fscache_n_attr_changed_ok);
42553+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
42554 fscache_put_operation(op);
42555 _leave(" = 0");
42556 return 0;
42557@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
42558 nobufs:
42559 spin_unlock(&cookie->lock);
42560 kfree(op);
42561- fscache_stat(&fscache_n_attr_changed_nobufs);
42562+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
42563 _leave(" = %d", -ENOBUFS);
42564 return -ENOBUFS;
42565 }
42566@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
42567 /* allocate a retrieval operation and attempt to submit it */
42568 op = kzalloc(sizeof(*op), GFP_NOIO);
42569 if (!op) {
42570- fscache_stat(&fscache_n_retrievals_nomem);
42571+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42572 return NULL;
42573 }
42574
42575@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
42576 return 0;
42577 }
42578
42579- fscache_stat(&fscache_n_retrievals_wait);
42580+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
42581
42582 jif = jiffies;
42583 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
42584 fscache_wait_bit_interruptible,
42585 TASK_INTERRUPTIBLE) != 0) {
42586- fscache_stat(&fscache_n_retrievals_intr);
42587+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42588 _leave(" = -ERESTARTSYS");
42589 return -ERESTARTSYS;
42590 }
42591@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
42592 */
42593 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
42594 struct fscache_retrieval *op,
42595- atomic_t *stat_op_waits,
42596- atomic_t *stat_object_dead)
42597+ atomic_unchecked_t *stat_op_waits,
42598+ atomic_unchecked_t *stat_object_dead)
42599 {
42600 int ret;
42601
42602@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
42603 goto check_if_dead;
42604
42605 _debug(">>> WT");
42606- fscache_stat(stat_op_waits);
42607+ fscache_stat_unchecked(stat_op_waits);
42608 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
42609 fscache_wait_bit_interruptible,
42610 TASK_INTERRUPTIBLE) < 0) {
42611@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
42612
42613 check_if_dead:
42614 if (unlikely(fscache_object_is_dead(object))) {
42615- fscache_stat(stat_object_dead);
42616+ fscache_stat_unchecked(stat_object_dead);
42617 return -ENOBUFS;
42618 }
42619 return 0;
42620@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
42621
42622 _enter("%p,%p,,,", cookie, page);
42623
42624- fscache_stat(&fscache_n_retrievals);
42625+ fscache_stat_unchecked(&fscache_n_retrievals);
42626
42627 if (hlist_empty(&cookie->backing_objects))
42628 goto nobufs;
42629@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
42630 goto nobufs_unlock;
42631 spin_unlock(&cookie->lock);
42632
42633- fscache_stat(&fscache_n_retrieval_ops);
42634+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
42635
42636 /* pin the netfs read context in case we need to do the actual netfs
42637 * read because we've encountered a cache read failure */
42638@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
42639
42640 error:
42641 if (ret == -ENOMEM)
42642- fscache_stat(&fscache_n_retrievals_nomem);
42643+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42644 else if (ret == -ERESTARTSYS)
42645- fscache_stat(&fscache_n_retrievals_intr);
42646+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42647 else if (ret == -ENODATA)
42648- fscache_stat(&fscache_n_retrievals_nodata);
42649+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42650 else if (ret < 0)
42651- fscache_stat(&fscache_n_retrievals_nobufs);
42652+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42653 else
42654- fscache_stat(&fscache_n_retrievals_ok);
42655+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
42656
42657 fscache_put_retrieval(op);
42658 _leave(" = %d", ret);
42659@@ -453,7 +453,7 @@ nobufs_unlock:
42660 spin_unlock(&cookie->lock);
42661 kfree(op);
42662 nobufs:
42663- fscache_stat(&fscache_n_retrievals_nobufs);
42664+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42665 _leave(" = -ENOBUFS");
42666 return -ENOBUFS;
42667 }
42668@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
42669
42670 _enter("%p,,%d,,,", cookie, *nr_pages);
42671
42672- fscache_stat(&fscache_n_retrievals);
42673+ fscache_stat_unchecked(&fscache_n_retrievals);
42674
42675 if (hlist_empty(&cookie->backing_objects))
42676 goto nobufs;
42677@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
42678 goto nobufs_unlock;
42679 spin_unlock(&cookie->lock);
42680
42681- fscache_stat(&fscache_n_retrieval_ops);
42682+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
42683
42684 /* pin the netfs read context in case we need to do the actual netfs
42685 * read because we've encountered a cache read failure */
42686@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
42687
42688 error:
42689 if (ret == -ENOMEM)
42690- fscache_stat(&fscache_n_retrievals_nomem);
42691+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42692 else if (ret == -ERESTARTSYS)
42693- fscache_stat(&fscache_n_retrievals_intr);
42694+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42695 else if (ret == -ENODATA)
42696- fscache_stat(&fscache_n_retrievals_nodata);
42697+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42698 else if (ret < 0)
42699- fscache_stat(&fscache_n_retrievals_nobufs);
42700+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42701 else
42702- fscache_stat(&fscache_n_retrievals_ok);
42703+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
42704
42705 fscache_put_retrieval(op);
42706 _leave(" = %d", ret);
42707@@ -570,7 +570,7 @@ nobufs_unlock:
42708 spin_unlock(&cookie->lock);
42709 kfree(op);
42710 nobufs:
42711- fscache_stat(&fscache_n_retrievals_nobufs);
42712+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42713 _leave(" = -ENOBUFS");
42714 return -ENOBUFS;
42715 }
42716@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
42717
42718 _enter("%p,%p,,,", cookie, page);
42719
42720- fscache_stat(&fscache_n_allocs);
42721+ fscache_stat_unchecked(&fscache_n_allocs);
42722
42723 if (hlist_empty(&cookie->backing_objects))
42724 goto nobufs;
42725@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
42726 goto nobufs_unlock;
42727 spin_unlock(&cookie->lock);
42728
42729- fscache_stat(&fscache_n_alloc_ops);
42730+ fscache_stat_unchecked(&fscache_n_alloc_ops);
42731
42732 ret = fscache_wait_for_retrieval_activation(
42733 object, op,
42734@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
42735
42736 error:
42737 if (ret == -ERESTARTSYS)
42738- fscache_stat(&fscache_n_allocs_intr);
42739+ fscache_stat_unchecked(&fscache_n_allocs_intr);
42740 else if (ret < 0)
42741- fscache_stat(&fscache_n_allocs_nobufs);
42742+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42743 else
42744- fscache_stat(&fscache_n_allocs_ok);
42745+ fscache_stat_unchecked(&fscache_n_allocs_ok);
42746
42747 fscache_put_retrieval(op);
42748 _leave(" = %d", ret);
42749@@ -651,7 +651,7 @@ nobufs_unlock:
42750 spin_unlock(&cookie->lock);
42751 kfree(op);
42752 nobufs:
42753- fscache_stat(&fscache_n_allocs_nobufs);
42754+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
42755 _leave(" = -ENOBUFS");
42756 return -ENOBUFS;
42757 }
42758@@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
42759
42760 spin_lock(&cookie->stores_lock);
42761
42762- fscache_stat(&fscache_n_store_calls);
42763+ fscache_stat_unchecked(&fscache_n_store_calls);
42764
42765 /* find a page to store */
42766 page = NULL;
42767@@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
42768 page = results[0];
42769 _debug("gang %d [%lx]", n, page->index);
42770 if (page->index > op->store_limit) {
42771- fscache_stat(&fscache_n_store_pages_over_limit);
42772+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
42773 goto superseded;
42774 }
42775
42776@@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
42777
42778 if (page) {
42779 fscache_set_op_state(&op->op, "Store");
42780- fscache_stat(&fscache_n_store_pages);
42781+ fscache_stat_unchecked(&fscache_n_store_pages);
42782 fscache_stat(&fscache_n_cop_write_page);
42783 ret = object->cache->ops->write_page(op, page);
42784 fscache_stat_d(&fscache_n_cop_write_page);
42785@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
42786 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42787 ASSERT(PageFsCache(page));
42788
42789- fscache_stat(&fscache_n_stores);
42790+ fscache_stat_unchecked(&fscache_n_stores);
42791
42792 op = kzalloc(sizeof(*op), GFP_NOIO);
42793 if (!op)
42794@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
42795 spin_unlock(&cookie->stores_lock);
42796 spin_unlock(&object->lock);
42797
42798- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
42799+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
42800 op->store_limit = object->store_limit;
42801
42802 if (fscache_submit_op(object, &op->op) < 0)
42803@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
42804
42805 spin_unlock(&cookie->lock);
42806 radix_tree_preload_end();
42807- fscache_stat(&fscache_n_store_ops);
42808- fscache_stat(&fscache_n_stores_ok);
42809+ fscache_stat_unchecked(&fscache_n_store_ops);
42810+ fscache_stat_unchecked(&fscache_n_stores_ok);
42811
42812 /* the slow work queue now carries its own ref on the object */
42813 fscache_put_operation(&op->op);
42814@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
42815 return 0;
42816
42817 already_queued:
42818- fscache_stat(&fscache_n_stores_again);
42819+ fscache_stat_unchecked(&fscache_n_stores_again);
42820 already_pending:
42821 spin_unlock(&cookie->stores_lock);
42822 spin_unlock(&object->lock);
42823 spin_unlock(&cookie->lock);
42824 radix_tree_preload_end();
42825 kfree(op);
42826- fscache_stat(&fscache_n_stores_ok);
42827+ fscache_stat_unchecked(&fscache_n_stores_ok);
42828 _leave(" = 0");
42829 return 0;
42830
42831@@ -886,14 +886,14 @@ nobufs:
42832 spin_unlock(&cookie->lock);
42833 radix_tree_preload_end();
42834 kfree(op);
42835- fscache_stat(&fscache_n_stores_nobufs);
42836+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
42837 _leave(" = -ENOBUFS");
42838 return -ENOBUFS;
42839
42840 nomem_free:
42841 kfree(op);
42842 nomem:
42843- fscache_stat(&fscache_n_stores_oom);
42844+ fscache_stat_unchecked(&fscache_n_stores_oom);
42845 _leave(" = -ENOMEM");
42846 return -ENOMEM;
42847 }
42848@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
42849 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42850 ASSERTCMP(page, !=, NULL);
42851
42852- fscache_stat(&fscache_n_uncaches);
42853+ fscache_stat_unchecked(&fscache_n_uncaches);
42854
42855 /* cache withdrawal may beat us to it */
42856 if (!PageFsCache(page))
42857@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
42858 unsigned long loop;
42859
42860 #ifdef CONFIG_FSCACHE_STATS
42861- atomic_add(pagevec->nr, &fscache_n_marks);
42862+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
42863 #endif
42864
42865 for (loop = 0; loop < pagevec->nr; loop++) {
42866diff -urNp linux-2.6.32.45/fs/fscache/stats.c linux-2.6.32.45/fs/fscache/stats.c
42867--- linux-2.6.32.45/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
42868+++ linux-2.6.32.45/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
42869@@ -18,95 +18,95 @@
42870 /*
42871 * operation counters
42872 */
42873-atomic_t fscache_n_op_pend;
42874-atomic_t fscache_n_op_run;
42875-atomic_t fscache_n_op_enqueue;
42876-atomic_t fscache_n_op_requeue;
42877-atomic_t fscache_n_op_deferred_release;
42878-atomic_t fscache_n_op_release;
42879-atomic_t fscache_n_op_gc;
42880-atomic_t fscache_n_op_cancelled;
42881-atomic_t fscache_n_op_rejected;
42882-
42883-atomic_t fscache_n_attr_changed;
42884-atomic_t fscache_n_attr_changed_ok;
42885-atomic_t fscache_n_attr_changed_nobufs;
42886-atomic_t fscache_n_attr_changed_nomem;
42887-atomic_t fscache_n_attr_changed_calls;
42888-
42889-atomic_t fscache_n_allocs;
42890-atomic_t fscache_n_allocs_ok;
42891-atomic_t fscache_n_allocs_wait;
42892-atomic_t fscache_n_allocs_nobufs;
42893-atomic_t fscache_n_allocs_intr;
42894-atomic_t fscache_n_allocs_object_dead;
42895-atomic_t fscache_n_alloc_ops;
42896-atomic_t fscache_n_alloc_op_waits;
42897-
42898-atomic_t fscache_n_retrievals;
42899-atomic_t fscache_n_retrievals_ok;
42900-atomic_t fscache_n_retrievals_wait;
42901-atomic_t fscache_n_retrievals_nodata;
42902-atomic_t fscache_n_retrievals_nobufs;
42903-atomic_t fscache_n_retrievals_intr;
42904-atomic_t fscache_n_retrievals_nomem;
42905-atomic_t fscache_n_retrievals_object_dead;
42906-atomic_t fscache_n_retrieval_ops;
42907-atomic_t fscache_n_retrieval_op_waits;
42908-
42909-atomic_t fscache_n_stores;
42910-atomic_t fscache_n_stores_ok;
42911-atomic_t fscache_n_stores_again;
42912-atomic_t fscache_n_stores_nobufs;
42913-atomic_t fscache_n_stores_oom;
42914-atomic_t fscache_n_store_ops;
42915-atomic_t fscache_n_store_calls;
42916-atomic_t fscache_n_store_pages;
42917-atomic_t fscache_n_store_radix_deletes;
42918-atomic_t fscache_n_store_pages_over_limit;
42919-
42920-atomic_t fscache_n_store_vmscan_not_storing;
42921-atomic_t fscache_n_store_vmscan_gone;
42922-atomic_t fscache_n_store_vmscan_busy;
42923-atomic_t fscache_n_store_vmscan_cancelled;
42924-
42925-atomic_t fscache_n_marks;
42926-atomic_t fscache_n_uncaches;
42927-
42928-atomic_t fscache_n_acquires;
42929-atomic_t fscache_n_acquires_null;
42930-atomic_t fscache_n_acquires_no_cache;
42931-atomic_t fscache_n_acquires_ok;
42932-atomic_t fscache_n_acquires_nobufs;
42933-atomic_t fscache_n_acquires_oom;
42934-
42935-atomic_t fscache_n_updates;
42936-atomic_t fscache_n_updates_null;
42937-atomic_t fscache_n_updates_run;
42938-
42939-atomic_t fscache_n_relinquishes;
42940-atomic_t fscache_n_relinquishes_null;
42941-atomic_t fscache_n_relinquishes_waitcrt;
42942-atomic_t fscache_n_relinquishes_retire;
42943-
42944-atomic_t fscache_n_cookie_index;
42945-atomic_t fscache_n_cookie_data;
42946-atomic_t fscache_n_cookie_special;
42947-
42948-atomic_t fscache_n_object_alloc;
42949-atomic_t fscache_n_object_no_alloc;
42950-atomic_t fscache_n_object_lookups;
42951-atomic_t fscache_n_object_lookups_negative;
42952-atomic_t fscache_n_object_lookups_positive;
42953-atomic_t fscache_n_object_lookups_timed_out;
42954-atomic_t fscache_n_object_created;
42955-atomic_t fscache_n_object_avail;
42956-atomic_t fscache_n_object_dead;
42957-
42958-atomic_t fscache_n_checkaux_none;
42959-atomic_t fscache_n_checkaux_okay;
42960-atomic_t fscache_n_checkaux_update;
42961-atomic_t fscache_n_checkaux_obsolete;
42962+atomic_unchecked_t fscache_n_op_pend;
42963+atomic_unchecked_t fscache_n_op_run;
42964+atomic_unchecked_t fscache_n_op_enqueue;
42965+atomic_unchecked_t fscache_n_op_requeue;
42966+atomic_unchecked_t fscache_n_op_deferred_release;
42967+atomic_unchecked_t fscache_n_op_release;
42968+atomic_unchecked_t fscache_n_op_gc;
42969+atomic_unchecked_t fscache_n_op_cancelled;
42970+atomic_unchecked_t fscache_n_op_rejected;
42971+
42972+atomic_unchecked_t fscache_n_attr_changed;
42973+atomic_unchecked_t fscache_n_attr_changed_ok;
42974+atomic_unchecked_t fscache_n_attr_changed_nobufs;
42975+atomic_unchecked_t fscache_n_attr_changed_nomem;
42976+atomic_unchecked_t fscache_n_attr_changed_calls;
42977+
42978+atomic_unchecked_t fscache_n_allocs;
42979+atomic_unchecked_t fscache_n_allocs_ok;
42980+atomic_unchecked_t fscache_n_allocs_wait;
42981+atomic_unchecked_t fscache_n_allocs_nobufs;
42982+atomic_unchecked_t fscache_n_allocs_intr;
42983+atomic_unchecked_t fscache_n_allocs_object_dead;
42984+atomic_unchecked_t fscache_n_alloc_ops;
42985+atomic_unchecked_t fscache_n_alloc_op_waits;
42986+
42987+atomic_unchecked_t fscache_n_retrievals;
42988+atomic_unchecked_t fscache_n_retrievals_ok;
42989+atomic_unchecked_t fscache_n_retrievals_wait;
42990+atomic_unchecked_t fscache_n_retrievals_nodata;
42991+atomic_unchecked_t fscache_n_retrievals_nobufs;
42992+atomic_unchecked_t fscache_n_retrievals_intr;
42993+atomic_unchecked_t fscache_n_retrievals_nomem;
42994+atomic_unchecked_t fscache_n_retrievals_object_dead;
42995+atomic_unchecked_t fscache_n_retrieval_ops;
42996+atomic_unchecked_t fscache_n_retrieval_op_waits;
42997+
42998+atomic_unchecked_t fscache_n_stores;
42999+atomic_unchecked_t fscache_n_stores_ok;
43000+atomic_unchecked_t fscache_n_stores_again;
43001+atomic_unchecked_t fscache_n_stores_nobufs;
43002+atomic_unchecked_t fscache_n_stores_oom;
43003+atomic_unchecked_t fscache_n_store_ops;
43004+atomic_unchecked_t fscache_n_store_calls;
43005+atomic_unchecked_t fscache_n_store_pages;
43006+atomic_unchecked_t fscache_n_store_radix_deletes;
43007+atomic_unchecked_t fscache_n_store_pages_over_limit;
43008+
43009+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43010+atomic_unchecked_t fscache_n_store_vmscan_gone;
43011+atomic_unchecked_t fscache_n_store_vmscan_busy;
43012+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43013+
43014+atomic_unchecked_t fscache_n_marks;
43015+atomic_unchecked_t fscache_n_uncaches;
43016+
43017+atomic_unchecked_t fscache_n_acquires;
43018+atomic_unchecked_t fscache_n_acquires_null;
43019+atomic_unchecked_t fscache_n_acquires_no_cache;
43020+atomic_unchecked_t fscache_n_acquires_ok;
43021+atomic_unchecked_t fscache_n_acquires_nobufs;
43022+atomic_unchecked_t fscache_n_acquires_oom;
43023+
43024+atomic_unchecked_t fscache_n_updates;
43025+atomic_unchecked_t fscache_n_updates_null;
43026+atomic_unchecked_t fscache_n_updates_run;
43027+
43028+atomic_unchecked_t fscache_n_relinquishes;
43029+atomic_unchecked_t fscache_n_relinquishes_null;
43030+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43031+atomic_unchecked_t fscache_n_relinquishes_retire;
43032+
43033+atomic_unchecked_t fscache_n_cookie_index;
43034+atomic_unchecked_t fscache_n_cookie_data;
43035+atomic_unchecked_t fscache_n_cookie_special;
43036+
43037+atomic_unchecked_t fscache_n_object_alloc;
43038+atomic_unchecked_t fscache_n_object_no_alloc;
43039+atomic_unchecked_t fscache_n_object_lookups;
43040+atomic_unchecked_t fscache_n_object_lookups_negative;
43041+atomic_unchecked_t fscache_n_object_lookups_positive;
43042+atomic_unchecked_t fscache_n_object_lookups_timed_out;
43043+atomic_unchecked_t fscache_n_object_created;
43044+atomic_unchecked_t fscache_n_object_avail;
43045+atomic_unchecked_t fscache_n_object_dead;
43046+
43047+atomic_unchecked_t fscache_n_checkaux_none;
43048+atomic_unchecked_t fscache_n_checkaux_okay;
43049+atomic_unchecked_t fscache_n_checkaux_update;
43050+atomic_unchecked_t fscache_n_checkaux_obsolete;
43051
43052 atomic_t fscache_n_cop_alloc_object;
43053 atomic_t fscache_n_cop_lookup_object;
43054@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
43055 seq_puts(m, "FS-Cache statistics\n");
43056
43057 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43058- atomic_read(&fscache_n_cookie_index),
43059- atomic_read(&fscache_n_cookie_data),
43060- atomic_read(&fscache_n_cookie_special));
43061+ atomic_read_unchecked(&fscache_n_cookie_index),
43062+ atomic_read_unchecked(&fscache_n_cookie_data),
43063+ atomic_read_unchecked(&fscache_n_cookie_special));
43064
43065 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43066- atomic_read(&fscache_n_object_alloc),
43067- atomic_read(&fscache_n_object_no_alloc),
43068- atomic_read(&fscache_n_object_avail),
43069- atomic_read(&fscache_n_object_dead));
43070+ atomic_read_unchecked(&fscache_n_object_alloc),
43071+ atomic_read_unchecked(&fscache_n_object_no_alloc),
43072+ atomic_read_unchecked(&fscache_n_object_avail),
43073+ atomic_read_unchecked(&fscache_n_object_dead));
43074 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
43075- atomic_read(&fscache_n_checkaux_none),
43076- atomic_read(&fscache_n_checkaux_okay),
43077- atomic_read(&fscache_n_checkaux_update),
43078- atomic_read(&fscache_n_checkaux_obsolete));
43079+ atomic_read_unchecked(&fscache_n_checkaux_none),
43080+ atomic_read_unchecked(&fscache_n_checkaux_okay),
43081+ atomic_read_unchecked(&fscache_n_checkaux_update),
43082+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
43083
43084 seq_printf(m, "Pages : mrk=%u unc=%u\n",
43085- atomic_read(&fscache_n_marks),
43086- atomic_read(&fscache_n_uncaches));
43087+ atomic_read_unchecked(&fscache_n_marks),
43088+ atomic_read_unchecked(&fscache_n_uncaches));
43089
43090 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
43091 " oom=%u\n",
43092- atomic_read(&fscache_n_acquires),
43093- atomic_read(&fscache_n_acquires_null),
43094- atomic_read(&fscache_n_acquires_no_cache),
43095- atomic_read(&fscache_n_acquires_ok),
43096- atomic_read(&fscache_n_acquires_nobufs),
43097- atomic_read(&fscache_n_acquires_oom));
43098+ atomic_read_unchecked(&fscache_n_acquires),
43099+ atomic_read_unchecked(&fscache_n_acquires_null),
43100+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
43101+ atomic_read_unchecked(&fscache_n_acquires_ok),
43102+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
43103+ atomic_read_unchecked(&fscache_n_acquires_oom));
43104
43105 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
43106- atomic_read(&fscache_n_object_lookups),
43107- atomic_read(&fscache_n_object_lookups_negative),
43108- atomic_read(&fscache_n_object_lookups_positive),
43109- atomic_read(&fscache_n_object_lookups_timed_out),
43110- atomic_read(&fscache_n_object_created));
43111+ atomic_read_unchecked(&fscache_n_object_lookups),
43112+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
43113+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
43114+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
43115+ atomic_read_unchecked(&fscache_n_object_created));
43116
43117 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
43118- atomic_read(&fscache_n_updates),
43119- atomic_read(&fscache_n_updates_null),
43120- atomic_read(&fscache_n_updates_run));
43121+ atomic_read_unchecked(&fscache_n_updates),
43122+ atomic_read_unchecked(&fscache_n_updates_null),
43123+ atomic_read_unchecked(&fscache_n_updates_run));
43124
43125 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
43126- atomic_read(&fscache_n_relinquishes),
43127- atomic_read(&fscache_n_relinquishes_null),
43128- atomic_read(&fscache_n_relinquishes_waitcrt),
43129- atomic_read(&fscache_n_relinquishes_retire));
43130+ atomic_read_unchecked(&fscache_n_relinquishes),
43131+ atomic_read_unchecked(&fscache_n_relinquishes_null),
43132+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
43133+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
43134
43135 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
43136- atomic_read(&fscache_n_attr_changed),
43137- atomic_read(&fscache_n_attr_changed_ok),
43138- atomic_read(&fscache_n_attr_changed_nobufs),
43139- atomic_read(&fscache_n_attr_changed_nomem),
43140- atomic_read(&fscache_n_attr_changed_calls));
43141+ atomic_read_unchecked(&fscache_n_attr_changed),
43142+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
43143+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
43144+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
43145+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
43146
43147 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
43148- atomic_read(&fscache_n_allocs),
43149- atomic_read(&fscache_n_allocs_ok),
43150- atomic_read(&fscache_n_allocs_wait),
43151- atomic_read(&fscache_n_allocs_nobufs),
43152- atomic_read(&fscache_n_allocs_intr));
43153+ atomic_read_unchecked(&fscache_n_allocs),
43154+ atomic_read_unchecked(&fscache_n_allocs_ok),
43155+ atomic_read_unchecked(&fscache_n_allocs_wait),
43156+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
43157+ atomic_read_unchecked(&fscache_n_allocs_intr));
43158 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
43159- atomic_read(&fscache_n_alloc_ops),
43160- atomic_read(&fscache_n_alloc_op_waits),
43161- atomic_read(&fscache_n_allocs_object_dead));
43162+ atomic_read_unchecked(&fscache_n_alloc_ops),
43163+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
43164+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
43165
43166 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43167 " int=%u oom=%u\n",
43168- atomic_read(&fscache_n_retrievals),
43169- atomic_read(&fscache_n_retrievals_ok),
43170- atomic_read(&fscache_n_retrievals_wait),
43171- atomic_read(&fscache_n_retrievals_nodata),
43172- atomic_read(&fscache_n_retrievals_nobufs),
43173- atomic_read(&fscache_n_retrievals_intr),
43174- atomic_read(&fscache_n_retrievals_nomem));
43175+ atomic_read_unchecked(&fscache_n_retrievals),
43176+ atomic_read_unchecked(&fscache_n_retrievals_ok),
43177+ atomic_read_unchecked(&fscache_n_retrievals_wait),
43178+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
43179+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43180+ atomic_read_unchecked(&fscache_n_retrievals_intr),
43181+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
43182 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43183- atomic_read(&fscache_n_retrieval_ops),
43184- atomic_read(&fscache_n_retrieval_op_waits),
43185- atomic_read(&fscache_n_retrievals_object_dead));
43186+ atomic_read_unchecked(&fscache_n_retrieval_ops),
43187+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43188+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43189
43190 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43191- atomic_read(&fscache_n_stores),
43192- atomic_read(&fscache_n_stores_ok),
43193- atomic_read(&fscache_n_stores_again),
43194- atomic_read(&fscache_n_stores_nobufs),
43195- atomic_read(&fscache_n_stores_oom));
43196+ atomic_read_unchecked(&fscache_n_stores),
43197+ atomic_read_unchecked(&fscache_n_stores_ok),
43198+ atomic_read_unchecked(&fscache_n_stores_again),
43199+ atomic_read_unchecked(&fscache_n_stores_nobufs),
43200+ atomic_read_unchecked(&fscache_n_stores_oom));
43201 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43202- atomic_read(&fscache_n_store_ops),
43203- atomic_read(&fscache_n_store_calls),
43204- atomic_read(&fscache_n_store_pages),
43205- atomic_read(&fscache_n_store_radix_deletes),
43206- atomic_read(&fscache_n_store_pages_over_limit));
43207+ atomic_read_unchecked(&fscache_n_store_ops),
43208+ atomic_read_unchecked(&fscache_n_store_calls),
43209+ atomic_read_unchecked(&fscache_n_store_pages),
43210+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
43211+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43212
43213 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43214- atomic_read(&fscache_n_store_vmscan_not_storing),
43215- atomic_read(&fscache_n_store_vmscan_gone),
43216- atomic_read(&fscache_n_store_vmscan_busy),
43217- atomic_read(&fscache_n_store_vmscan_cancelled));
43218+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43219+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43220+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43221+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43222
43223 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43224- atomic_read(&fscache_n_op_pend),
43225- atomic_read(&fscache_n_op_run),
43226- atomic_read(&fscache_n_op_enqueue),
43227- atomic_read(&fscache_n_op_cancelled),
43228- atomic_read(&fscache_n_op_rejected));
43229+ atomic_read_unchecked(&fscache_n_op_pend),
43230+ atomic_read_unchecked(&fscache_n_op_run),
43231+ atomic_read_unchecked(&fscache_n_op_enqueue),
43232+ atomic_read_unchecked(&fscache_n_op_cancelled),
43233+ atomic_read_unchecked(&fscache_n_op_rejected));
43234 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43235- atomic_read(&fscache_n_op_deferred_release),
43236- atomic_read(&fscache_n_op_release),
43237- atomic_read(&fscache_n_op_gc));
43238+ atomic_read_unchecked(&fscache_n_op_deferred_release),
43239+ atomic_read_unchecked(&fscache_n_op_release),
43240+ atomic_read_unchecked(&fscache_n_op_gc));
43241
43242 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43243 atomic_read(&fscache_n_cop_alloc_object),
43244diff -urNp linux-2.6.32.45/fs/fs_struct.c linux-2.6.32.45/fs/fs_struct.c
43245--- linux-2.6.32.45/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
43246+++ linux-2.6.32.45/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
43247@@ -4,6 +4,7 @@
43248 #include <linux/path.h>
43249 #include <linux/slab.h>
43250 #include <linux/fs_struct.h>
43251+#include <linux/grsecurity.h>
43252
43253 /*
43254 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
43255@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
43256 old_root = fs->root;
43257 fs->root = *path;
43258 path_get(path);
43259+ gr_set_chroot_entries(current, path);
43260 write_unlock(&fs->lock);
43261 if (old_root.dentry)
43262 path_put(&old_root);
43263@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
43264 && fs->root.mnt == old_root->mnt) {
43265 path_get(new_root);
43266 fs->root = *new_root;
43267+ gr_set_chroot_entries(p, new_root);
43268 count++;
43269 }
43270 if (fs->pwd.dentry == old_root->dentry
43271@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
43272 task_lock(tsk);
43273 write_lock(&fs->lock);
43274 tsk->fs = NULL;
43275- kill = !--fs->users;
43276+ gr_clear_chroot_entries(tsk);
43277+ kill = !atomic_dec_return(&fs->users);
43278 write_unlock(&fs->lock);
43279 task_unlock(tsk);
43280 if (kill)
43281@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
43282 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
43283 /* We don't need to lock fs - think why ;-) */
43284 if (fs) {
43285- fs->users = 1;
43286+ atomic_set(&fs->users, 1);
43287 fs->in_exec = 0;
43288 rwlock_init(&fs->lock);
43289 fs->umask = old->umask;
43290@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
43291
43292 task_lock(current);
43293 write_lock(&fs->lock);
43294- kill = !--fs->users;
43295+ kill = !atomic_dec_return(&fs->users);
43296 current->fs = new_fs;
43297+ gr_set_chroot_entries(current, &new_fs->root);
43298 write_unlock(&fs->lock);
43299 task_unlock(current);
43300
43301@@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
43302
43303 /* to be mentioned only in INIT_TASK */
43304 struct fs_struct init_fs = {
43305- .users = 1,
43306+ .users = ATOMIC_INIT(1),
43307 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
43308 .umask = 0022,
43309 };
43310@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
43311 task_lock(current);
43312
43313 write_lock(&init_fs.lock);
43314- init_fs.users++;
43315+ atomic_inc(&init_fs.users);
43316 write_unlock(&init_fs.lock);
43317
43318 write_lock(&fs->lock);
43319 current->fs = &init_fs;
43320- kill = !--fs->users;
43321+ gr_set_chroot_entries(current, &current->fs->root);
43322+ kill = !atomic_dec_return(&fs->users);
43323 write_unlock(&fs->lock);
43324
43325 task_unlock(current);
43326diff -urNp linux-2.6.32.45/fs/fuse/cuse.c linux-2.6.32.45/fs/fuse/cuse.c
43327--- linux-2.6.32.45/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
43328+++ linux-2.6.32.45/fs/fuse/cuse.c 2011-08-05 20:33:55.000000000 -0400
43329@@ -576,10 +576,12 @@ static int __init cuse_init(void)
43330 INIT_LIST_HEAD(&cuse_conntbl[i]);
43331
43332 /* inherit and extend fuse_dev_operations */
43333- cuse_channel_fops = fuse_dev_operations;
43334- cuse_channel_fops.owner = THIS_MODULE;
43335- cuse_channel_fops.open = cuse_channel_open;
43336- cuse_channel_fops.release = cuse_channel_release;
43337+ pax_open_kernel();
43338+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43339+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43340+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
43341+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
43342+ pax_close_kernel();
43343
43344 cuse_class = class_create(THIS_MODULE, "cuse");
43345 if (IS_ERR(cuse_class))
43346diff -urNp linux-2.6.32.45/fs/fuse/dev.c linux-2.6.32.45/fs/fuse/dev.c
43347--- linux-2.6.32.45/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
43348+++ linux-2.6.32.45/fs/fuse/dev.c 2011-08-05 20:33:55.000000000 -0400
43349@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struc
43350 {
43351 struct fuse_notify_inval_entry_out outarg;
43352 int err = -EINVAL;
43353- char buf[FUSE_NAME_MAX+1];
43354+ char *buf = NULL;
43355 struct qstr name;
43356
43357 if (size < sizeof(outarg))
43358@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struc
43359 if (outarg.namelen > FUSE_NAME_MAX)
43360 goto err;
43361
43362+ err = -ENOMEM;
43363+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
43364+ if (!buf)
43365+ goto err;
43366+
43367 name.name = buf;
43368 name.len = outarg.namelen;
43369 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
43370@@ -910,17 +915,15 @@ static int fuse_notify_inval_entry(struc
43371
43372 down_read(&fc->killsb);
43373 err = -ENOENT;
43374- if (!fc->sb)
43375- goto err_unlock;
43376-
43377- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
43378-
43379-err_unlock:
43380+ if (fc->sb)
43381+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
43382 up_read(&fc->killsb);
43383+ kfree(buf);
43384 return err;
43385
43386 err:
43387 fuse_copy_finish(cs);
43388+ kfree(buf);
43389 return err;
43390 }
43391
43392diff -urNp linux-2.6.32.45/fs/fuse/dir.c linux-2.6.32.45/fs/fuse/dir.c
43393--- linux-2.6.32.45/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
43394+++ linux-2.6.32.45/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
43395@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
43396 return link;
43397 }
43398
43399-static void free_link(char *link)
43400+static void free_link(const char *link)
43401 {
43402 if (!IS_ERR(link))
43403 free_page((unsigned long) link);
43404diff -urNp linux-2.6.32.45/fs/gfs2/ops_inode.c linux-2.6.32.45/fs/gfs2/ops_inode.c
43405--- linux-2.6.32.45/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
43406+++ linux-2.6.32.45/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
43407@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
43408 unsigned int x;
43409 int error;
43410
43411+ pax_track_stack();
43412+
43413 if (ndentry->d_inode) {
43414 nip = GFS2_I(ndentry->d_inode);
43415 if (ip == nip)
43416diff -urNp linux-2.6.32.45/fs/gfs2/sys.c linux-2.6.32.45/fs/gfs2/sys.c
43417--- linux-2.6.32.45/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
43418+++ linux-2.6.32.45/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
43419@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
43420 return a->store ? a->store(sdp, buf, len) : len;
43421 }
43422
43423-static struct sysfs_ops gfs2_attr_ops = {
43424+static const struct sysfs_ops gfs2_attr_ops = {
43425 .show = gfs2_attr_show,
43426 .store = gfs2_attr_store,
43427 };
43428@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
43429 return 0;
43430 }
43431
43432-static struct kset_uevent_ops gfs2_uevent_ops = {
43433+static const struct kset_uevent_ops gfs2_uevent_ops = {
43434 .uevent = gfs2_uevent,
43435 };
43436
43437diff -urNp linux-2.6.32.45/fs/hfsplus/catalog.c linux-2.6.32.45/fs/hfsplus/catalog.c
43438--- linux-2.6.32.45/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
43439+++ linux-2.6.32.45/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
43440@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
43441 int err;
43442 u16 type;
43443
43444+ pax_track_stack();
43445+
43446 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
43447 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
43448 if (err)
43449@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
43450 int entry_size;
43451 int err;
43452
43453+ pax_track_stack();
43454+
43455 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
43456 sb = dir->i_sb;
43457 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
43458@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
43459 int entry_size, type;
43460 int err = 0;
43461
43462+ pax_track_stack();
43463+
43464 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
43465 dst_dir->i_ino, dst_name->name);
43466 sb = src_dir->i_sb;
43467diff -urNp linux-2.6.32.45/fs/hfsplus/dir.c linux-2.6.32.45/fs/hfsplus/dir.c
43468--- linux-2.6.32.45/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
43469+++ linux-2.6.32.45/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
43470@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
43471 struct hfsplus_readdir_data *rd;
43472 u16 type;
43473
43474+ pax_track_stack();
43475+
43476 if (filp->f_pos >= inode->i_size)
43477 return 0;
43478
43479diff -urNp linux-2.6.32.45/fs/hfsplus/inode.c linux-2.6.32.45/fs/hfsplus/inode.c
43480--- linux-2.6.32.45/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
43481+++ linux-2.6.32.45/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
43482@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
43483 int res = 0;
43484 u16 type;
43485
43486+ pax_track_stack();
43487+
43488 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
43489
43490 HFSPLUS_I(inode).dev = 0;
43491@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
43492 struct hfs_find_data fd;
43493 hfsplus_cat_entry entry;
43494
43495+ pax_track_stack();
43496+
43497 if (HFSPLUS_IS_RSRC(inode))
43498 main_inode = HFSPLUS_I(inode).rsrc_inode;
43499
43500diff -urNp linux-2.6.32.45/fs/hfsplus/ioctl.c linux-2.6.32.45/fs/hfsplus/ioctl.c
43501--- linux-2.6.32.45/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43502+++ linux-2.6.32.45/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
43503@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
43504 struct hfsplus_cat_file *file;
43505 int res;
43506
43507+ pax_track_stack();
43508+
43509 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43510 return -EOPNOTSUPP;
43511
43512@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
43513 struct hfsplus_cat_file *file;
43514 ssize_t res = 0;
43515
43516+ pax_track_stack();
43517+
43518 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43519 return -EOPNOTSUPP;
43520
43521diff -urNp linux-2.6.32.45/fs/hfsplus/super.c linux-2.6.32.45/fs/hfsplus/super.c
43522--- linux-2.6.32.45/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
43523+++ linux-2.6.32.45/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
43524@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
43525 struct nls_table *nls = NULL;
43526 int err = -EINVAL;
43527
43528+ pax_track_stack();
43529+
43530 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
43531 if (!sbi)
43532 return -ENOMEM;
43533diff -urNp linux-2.6.32.45/fs/hugetlbfs/inode.c linux-2.6.32.45/fs/hugetlbfs/inode.c
43534--- linux-2.6.32.45/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
43535+++ linux-2.6.32.45/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
43536@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
43537 .kill_sb = kill_litter_super,
43538 };
43539
43540-static struct vfsmount *hugetlbfs_vfsmount;
43541+struct vfsmount *hugetlbfs_vfsmount;
43542
43543 static int can_do_hugetlb_shm(void)
43544 {
43545diff -urNp linux-2.6.32.45/fs/ioctl.c linux-2.6.32.45/fs/ioctl.c
43546--- linux-2.6.32.45/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43547+++ linux-2.6.32.45/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
43548@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
43549 u64 phys, u64 len, u32 flags)
43550 {
43551 struct fiemap_extent extent;
43552- struct fiemap_extent *dest = fieinfo->fi_extents_start;
43553+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
43554
43555 /* only count the extents */
43556 if (fieinfo->fi_extents_max == 0) {
43557@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
43558
43559 fieinfo.fi_flags = fiemap.fm_flags;
43560 fieinfo.fi_extents_max = fiemap.fm_extent_count;
43561- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
43562+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
43563
43564 if (fiemap.fm_extent_count != 0 &&
43565 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
43566@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
43567 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
43568 fiemap.fm_flags = fieinfo.fi_flags;
43569 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
43570- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
43571+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
43572 error = -EFAULT;
43573
43574 return error;
43575diff -urNp linux-2.6.32.45/fs/jbd/checkpoint.c linux-2.6.32.45/fs/jbd/checkpoint.c
43576--- linux-2.6.32.45/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
43577+++ linux-2.6.32.45/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
43578@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
43579 tid_t this_tid;
43580 int result;
43581
43582+ pax_track_stack();
43583+
43584 jbd_debug(1, "Start checkpoint\n");
43585
43586 /*
43587diff -urNp linux-2.6.32.45/fs/jffs2/compr_rtime.c linux-2.6.32.45/fs/jffs2/compr_rtime.c
43588--- linux-2.6.32.45/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
43589+++ linux-2.6.32.45/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
43590@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
43591 int outpos = 0;
43592 int pos=0;
43593
43594+ pax_track_stack();
43595+
43596 memset(positions,0,sizeof(positions));
43597
43598 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
43599@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
43600 int outpos = 0;
43601 int pos=0;
43602
43603+ pax_track_stack();
43604+
43605 memset(positions,0,sizeof(positions));
43606
43607 while (outpos<destlen) {
43608diff -urNp linux-2.6.32.45/fs/jffs2/compr_rubin.c linux-2.6.32.45/fs/jffs2/compr_rubin.c
43609--- linux-2.6.32.45/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
43610+++ linux-2.6.32.45/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
43611@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
43612 int ret;
43613 uint32_t mysrclen, mydstlen;
43614
43615+ pax_track_stack();
43616+
43617 mysrclen = *sourcelen;
43618 mydstlen = *dstlen - 8;
43619
43620diff -urNp linux-2.6.32.45/fs/jffs2/erase.c linux-2.6.32.45/fs/jffs2/erase.c
43621--- linux-2.6.32.45/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
43622+++ linux-2.6.32.45/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
43623@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
43624 struct jffs2_unknown_node marker = {
43625 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
43626 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43627- .totlen = cpu_to_je32(c->cleanmarker_size)
43628+ .totlen = cpu_to_je32(c->cleanmarker_size),
43629+ .hdr_crc = cpu_to_je32(0)
43630 };
43631
43632 jffs2_prealloc_raw_node_refs(c, jeb, 1);
43633diff -urNp linux-2.6.32.45/fs/jffs2/wbuf.c linux-2.6.32.45/fs/jffs2/wbuf.c
43634--- linux-2.6.32.45/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
43635+++ linux-2.6.32.45/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
43636@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
43637 {
43638 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
43639 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43640- .totlen = constant_cpu_to_je32(8)
43641+ .totlen = constant_cpu_to_je32(8),
43642+ .hdr_crc = constant_cpu_to_je32(0)
43643 };
43644
43645 /*
43646diff -urNp linux-2.6.32.45/fs/jffs2/xattr.c linux-2.6.32.45/fs/jffs2/xattr.c
43647--- linux-2.6.32.45/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
43648+++ linux-2.6.32.45/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
43649@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
43650
43651 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
43652
43653+ pax_track_stack();
43654+
43655 /* Phase.1 : Merge same xref */
43656 for (i=0; i < XREF_TMPHASH_SIZE; i++)
43657 xref_tmphash[i] = NULL;
43658diff -urNp linux-2.6.32.45/fs/jfs/super.c linux-2.6.32.45/fs/jfs/super.c
43659--- linux-2.6.32.45/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
43660+++ linux-2.6.32.45/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
43661@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
43662
43663 jfs_inode_cachep =
43664 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
43665- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
43666+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
43667 init_once);
43668 if (jfs_inode_cachep == NULL)
43669 return -ENOMEM;
43670diff -urNp linux-2.6.32.45/fs/Kconfig.binfmt linux-2.6.32.45/fs/Kconfig.binfmt
43671--- linux-2.6.32.45/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
43672+++ linux-2.6.32.45/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
43673@@ -86,7 +86,7 @@ config HAVE_AOUT
43674
43675 config BINFMT_AOUT
43676 tristate "Kernel support for a.out and ECOFF binaries"
43677- depends on HAVE_AOUT
43678+ depends on HAVE_AOUT && BROKEN
43679 ---help---
43680 A.out (Assembler.OUTput) is a set of formats for libraries and
43681 executables used in the earliest versions of UNIX. Linux used
43682diff -urNp linux-2.6.32.45/fs/libfs.c linux-2.6.32.45/fs/libfs.c
43683--- linux-2.6.32.45/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
43684+++ linux-2.6.32.45/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
43685@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
43686
43687 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
43688 struct dentry *next;
43689+ char d_name[sizeof(next->d_iname)];
43690+ const unsigned char *name;
43691+
43692 next = list_entry(p, struct dentry, d_u.d_child);
43693 if (d_unhashed(next) || !next->d_inode)
43694 continue;
43695
43696 spin_unlock(&dcache_lock);
43697- if (filldir(dirent, next->d_name.name,
43698+ name = next->d_name.name;
43699+ if (name == next->d_iname) {
43700+ memcpy(d_name, name, next->d_name.len);
43701+ name = d_name;
43702+ }
43703+ if (filldir(dirent, name,
43704 next->d_name.len, filp->f_pos,
43705 next->d_inode->i_ino,
43706 dt_type(next->d_inode)) < 0)
43707diff -urNp linux-2.6.32.45/fs/lockd/clntproc.c linux-2.6.32.45/fs/lockd/clntproc.c
43708--- linux-2.6.32.45/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
43709+++ linux-2.6.32.45/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
43710@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
43711 /*
43712 * Cookie counter for NLM requests
43713 */
43714-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
43715+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
43716
43717 void nlmclnt_next_cookie(struct nlm_cookie *c)
43718 {
43719- u32 cookie = atomic_inc_return(&nlm_cookie);
43720+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
43721
43722 memcpy(c->data, &cookie, 4);
43723 c->len=4;
43724@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
43725 struct nlm_rqst reqst, *req;
43726 int status;
43727
43728+ pax_track_stack();
43729+
43730 req = &reqst;
43731 memset(req, 0, sizeof(*req));
43732 locks_init_lock(&req->a_args.lock.fl);
43733diff -urNp linux-2.6.32.45/fs/lockd/svc.c linux-2.6.32.45/fs/lockd/svc.c
43734--- linux-2.6.32.45/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
43735+++ linux-2.6.32.45/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
43736@@ -43,7 +43,7 @@
43737
43738 static struct svc_program nlmsvc_program;
43739
43740-struct nlmsvc_binding * nlmsvc_ops;
43741+const struct nlmsvc_binding * nlmsvc_ops;
43742 EXPORT_SYMBOL_GPL(nlmsvc_ops);
43743
43744 static DEFINE_MUTEX(nlmsvc_mutex);
43745diff -urNp linux-2.6.32.45/fs/locks.c linux-2.6.32.45/fs/locks.c
43746--- linux-2.6.32.45/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
43747+++ linux-2.6.32.45/fs/locks.c 2011-07-06 19:47:11.000000000 -0400
43748@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
43749
43750 static struct kmem_cache *filelock_cache __read_mostly;
43751
43752+static void locks_init_lock_always(struct file_lock *fl)
43753+{
43754+ fl->fl_next = NULL;
43755+ fl->fl_fasync = NULL;
43756+ fl->fl_owner = NULL;
43757+ fl->fl_pid = 0;
43758+ fl->fl_nspid = NULL;
43759+ fl->fl_file = NULL;
43760+ fl->fl_flags = 0;
43761+ fl->fl_type = 0;
43762+ fl->fl_start = fl->fl_end = 0;
43763+}
43764+
43765 /* Allocate an empty lock structure. */
43766 static struct file_lock *locks_alloc_lock(void)
43767 {
43768- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43769+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
43770+
43771+ if (fl)
43772+ locks_init_lock_always(fl);
43773+
43774+ return fl;
43775 }
43776
43777 void locks_release_private(struct file_lock *fl)
43778@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *f
43779 INIT_LIST_HEAD(&fl->fl_link);
43780 INIT_LIST_HEAD(&fl->fl_block);
43781 init_waitqueue_head(&fl->fl_wait);
43782- fl->fl_next = NULL;
43783- fl->fl_fasync = NULL;
43784- fl->fl_owner = NULL;
43785- fl->fl_pid = 0;
43786- fl->fl_nspid = NULL;
43787- fl->fl_file = NULL;
43788- fl->fl_flags = 0;
43789- fl->fl_type = 0;
43790- fl->fl_start = fl->fl_end = 0;
43791 fl->fl_ops = NULL;
43792 fl->fl_lmops = NULL;
43793+ locks_init_lock_always(fl);
43794 }
43795
43796 EXPORT_SYMBOL(locks_init_lock);
43797@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *fil
43798 return;
43799
43800 if (filp->f_op && filp->f_op->flock) {
43801- struct file_lock fl = {
43802+ struct file_lock flock = {
43803 .fl_pid = current->tgid,
43804 .fl_file = filp,
43805 .fl_flags = FL_FLOCK,
43806 .fl_type = F_UNLCK,
43807 .fl_end = OFFSET_MAX,
43808 };
43809- filp->f_op->flock(filp, F_SETLKW, &fl);
43810- if (fl.fl_ops && fl.fl_ops->fl_release_private)
43811- fl.fl_ops->fl_release_private(&fl);
43812+ filp->f_op->flock(filp, F_SETLKW, &flock);
43813+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
43814+ flock.fl_ops->fl_release_private(&flock);
43815 }
43816
43817 lock_kernel();
43818diff -urNp linux-2.6.32.45/fs/mbcache.c linux-2.6.32.45/fs/mbcache.c
43819--- linux-2.6.32.45/fs/mbcache.c 2011-03-27 14:31:47.000000000 -0400
43820+++ linux-2.6.32.45/fs/mbcache.c 2011-08-05 20:33:55.000000000 -0400
43821@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct
43822 if (!cache)
43823 goto fail;
43824 cache->c_name = name;
43825- cache->c_op.free = NULL;
43826+ *(void **)&cache->c_op.free = NULL;
43827 if (cache_op)
43828- cache->c_op.free = cache_op->free;
43829+ *(void **)&cache->c_op.free = cache_op->free;
43830 atomic_set(&cache->c_entry_count, 0);
43831 cache->c_bucket_bits = bucket_bits;
43832 #ifdef MB_CACHE_INDEXES_COUNT
43833diff -urNp linux-2.6.32.45/fs/namei.c linux-2.6.32.45/fs/namei.c
43834--- linux-2.6.32.45/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
43835+++ linux-2.6.32.45/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
43836@@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
43837 return ret;
43838
43839 /*
43840- * Read/write DACs are always overridable.
43841- * Executable DACs are overridable if at least one exec bit is set.
43842- */
43843- if (!(mask & MAY_EXEC) || execute_ok(inode))
43844- if (capable(CAP_DAC_OVERRIDE))
43845- return 0;
43846-
43847- /*
43848 * Searching includes executable on directories, else just read.
43849 */
43850 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
43851@@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
43852 if (capable(CAP_DAC_READ_SEARCH))
43853 return 0;
43854
43855+ /*
43856+ * Read/write DACs are always overridable.
43857+ * Executable DACs are overridable if at least one exec bit is set.
43858+ */
43859+ if (!(mask & MAY_EXEC) || execute_ok(inode))
43860+ if (capable(CAP_DAC_OVERRIDE))
43861+ return 0;
43862+
43863 return -EACCES;
43864 }
43865
43866@@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
43867 if (!ret)
43868 goto ok;
43869
43870- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
43871+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
43872+ capable(CAP_DAC_OVERRIDE))
43873 goto ok;
43874
43875 return ret;
43876@@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
43877 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
43878 error = PTR_ERR(cookie);
43879 if (!IS_ERR(cookie)) {
43880- char *s = nd_get_link(nd);
43881+ const char *s = nd_get_link(nd);
43882 error = 0;
43883 if (s)
43884 error = __vfs_follow_link(nd, s);
43885@@ -669,6 +670,13 @@ static inline int do_follow_link(struct
43886 err = security_inode_follow_link(path->dentry, nd);
43887 if (err)
43888 goto loop;
43889+
43890+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
43891+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
43892+ err = -EACCES;
43893+ goto loop;
43894+ }
43895+
43896 current->link_count++;
43897 current->total_link_count++;
43898 nd->depth++;
43899@@ -1016,11 +1024,18 @@ return_reval:
43900 break;
43901 }
43902 return_base:
43903+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
43904+ path_put(&nd->path);
43905+ return -ENOENT;
43906+ }
43907 return 0;
43908 out_dput:
43909 path_put_conditional(&next, nd);
43910 break;
43911 }
43912+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
43913+ err = -ENOENT;
43914+
43915 path_put(&nd->path);
43916 return_err:
43917 return err;
43918@@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
43919 int retval = path_init(dfd, name, flags, nd);
43920 if (!retval)
43921 retval = path_walk(name, nd);
43922- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
43923- nd->path.dentry->d_inode))
43924- audit_inode(name, nd->path.dentry);
43925+
43926+ if (likely(!retval)) {
43927+ if (nd->path.dentry && nd->path.dentry->d_inode) {
43928+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
43929+ retval = -ENOENT;
43930+ if (!audit_dummy_context())
43931+ audit_inode(name, nd->path.dentry);
43932+ }
43933+ }
43934 if (nd->root.mnt) {
43935 path_put(&nd->root);
43936 nd->root.mnt = NULL;
43937 }
43938+
43939 return retval;
43940 }
43941
43942@@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
43943 if (error)
43944 goto err_out;
43945
43946+
43947+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
43948+ error = -EPERM;
43949+ goto err_out;
43950+ }
43951+ if (gr_handle_rawio(inode)) {
43952+ error = -EPERM;
43953+ goto err_out;
43954+ }
43955+ if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
43956+ error = -EACCES;
43957+ goto err_out;
43958+ }
43959+
43960 if (flag & O_TRUNC) {
43961 error = get_write_access(inode);
43962 if (error)
43963@@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
43964 int error;
43965 struct dentry *dir = nd->path.dentry;
43966
43967+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
43968+ error = -EACCES;
43969+ goto out_unlock;
43970+ }
43971+
43972 if (!IS_POSIXACL(dir->d_inode))
43973 mode &= ~current_umask();
43974 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
43975 if (error)
43976 goto out_unlock;
43977 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
43978+ if (!error)
43979+ gr_handle_create(path->dentry, nd->path.mnt);
43980 out_unlock:
43981 mutex_unlock(&dir->d_inode->i_mutex);
43982 dput(nd->path.dentry);
43983@@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
43984 &nd, flag);
43985 if (error)
43986 return ERR_PTR(error);
43987+
43988+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
43989+ error = -EPERM;
43990+ goto exit;
43991+ }
43992+
43993+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
43994+ error = -EPERM;
43995+ goto exit;
43996+ }
43997+
43998+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
43999+ error = -EACCES;
44000+ goto exit;
44001+ }
44002+
44003 goto ok;
44004 }
44005
44006@@ -1795,6 +1854,14 @@ do_last:
44007 /*
44008 * It already exists.
44009 */
44010+
44011+ /* only check if O_CREAT is specified, all other checks need
44012+ to go into may_open */
44013+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
44014+ error = -EACCES;
44015+ goto exit_mutex_unlock;
44016+ }
44017+
44018 mutex_unlock(&dir->d_inode->i_mutex);
44019 audit_inode(pathname, path.dentry);
44020
44021@@ -1887,6 +1954,13 @@ do_link:
44022 error = security_inode_follow_link(path.dentry, &nd);
44023 if (error)
44024 goto exit_dput;
44025+
44026+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
44027+ path.dentry, nd.path.mnt)) {
44028+ error = -EACCES;
44029+ goto exit_dput;
44030+ }
44031+
44032 error = __do_follow_link(&path, &nd);
44033 if (error) {
44034 /* Does someone understand code flow here? Or it is only
44035@@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
44036 error = may_mknod(mode);
44037 if (error)
44038 goto out_dput;
44039+
44040+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
44041+ error = -EPERM;
44042+ goto out_dput;
44043+ }
44044+
44045+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
44046+ error = -EACCES;
44047+ goto out_dput;
44048+ }
44049+
44050 error = mnt_want_write(nd.path.mnt);
44051 if (error)
44052 goto out_dput;
44053@@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
44054 }
44055 out_drop_write:
44056 mnt_drop_write(nd.path.mnt);
44057+
44058+ if (!error)
44059+ gr_handle_create(dentry, nd.path.mnt);
44060 out_dput:
44061 dput(dentry);
44062 out_unlock:
44063@@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
44064 if (IS_ERR(dentry))
44065 goto out_unlock;
44066
44067+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
44068+ error = -EACCES;
44069+ goto out_dput;
44070+ }
44071+
44072 if (!IS_POSIXACL(nd.path.dentry->d_inode))
44073 mode &= ~current_umask();
44074 error = mnt_want_write(nd.path.mnt);
44075@@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
44076 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
44077 out_drop_write:
44078 mnt_drop_write(nd.path.mnt);
44079+
44080+ if (!error)
44081+ gr_handle_create(dentry, nd.path.mnt);
44082+
44083 out_dput:
44084 dput(dentry);
44085 out_unlock:
44086@@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
44087 char * name;
44088 struct dentry *dentry;
44089 struct nameidata nd;
44090+ ino_t saved_ino = 0;
44091+ dev_t saved_dev = 0;
44092
44093 error = user_path_parent(dfd, pathname, &nd, &name);
44094 if (error)
44095@@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
44096 error = PTR_ERR(dentry);
44097 if (IS_ERR(dentry))
44098 goto exit2;
44099+
44100+ if (dentry->d_inode != NULL) {
44101+ if (dentry->d_inode->i_nlink <= 1) {
44102+ saved_ino = dentry->d_inode->i_ino;
44103+ saved_dev = gr_get_dev_from_dentry(dentry);
44104+ }
44105+
44106+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44107+ error = -EACCES;
44108+ goto exit3;
44109+ }
44110+ }
44111+
44112 error = mnt_want_write(nd.path.mnt);
44113 if (error)
44114 goto exit3;
44115@@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
44116 if (error)
44117 goto exit4;
44118 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44119+ if (!error && (saved_dev || saved_ino))
44120+ gr_handle_delete(saved_ino, saved_dev);
44121 exit4:
44122 mnt_drop_write(nd.path.mnt);
44123 exit3:
44124@@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
44125 struct dentry *dentry;
44126 struct nameidata nd;
44127 struct inode *inode = NULL;
44128+ ino_t saved_ino = 0;
44129+ dev_t saved_dev = 0;
44130
44131 error = user_path_parent(dfd, pathname, &nd, &name);
44132 if (error)
44133@@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
44134 if (nd.last.name[nd.last.len])
44135 goto slashes;
44136 inode = dentry->d_inode;
44137- if (inode)
44138+ if (inode) {
44139+ if (inode->i_nlink <= 1) {
44140+ saved_ino = inode->i_ino;
44141+ saved_dev = gr_get_dev_from_dentry(dentry);
44142+ }
44143+
44144 atomic_inc(&inode->i_count);
44145+
44146+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44147+ error = -EACCES;
44148+ goto exit2;
44149+ }
44150+ }
44151 error = mnt_want_write(nd.path.mnt);
44152 if (error)
44153 goto exit2;
44154@@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
44155 if (error)
44156 goto exit3;
44157 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44158+ if (!error && (saved_ino || saved_dev))
44159+ gr_handle_delete(saved_ino, saved_dev);
44160 exit3:
44161 mnt_drop_write(nd.path.mnt);
44162 exit2:
44163@@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
44164 if (IS_ERR(dentry))
44165 goto out_unlock;
44166
44167+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
44168+ error = -EACCES;
44169+ goto out_dput;
44170+ }
44171+
44172 error = mnt_want_write(nd.path.mnt);
44173 if (error)
44174 goto out_dput;
44175@@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
44176 if (error)
44177 goto out_drop_write;
44178 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
44179+ if (!error)
44180+ gr_handle_create(dentry, nd.path.mnt);
44181 out_drop_write:
44182 mnt_drop_write(nd.path.mnt);
44183 out_dput:
44184@@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44185 error = PTR_ERR(new_dentry);
44186 if (IS_ERR(new_dentry))
44187 goto out_unlock;
44188+
44189+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44190+ old_path.dentry->d_inode,
44191+ old_path.dentry->d_inode->i_mode, to)) {
44192+ error = -EACCES;
44193+ goto out_dput;
44194+ }
44195+
44196+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
44197+ old_path.dentry, old_path.mnt, to)) {
44198+ error = -EACCES;
44199+ goto out_dput;
44200+ }
44201+
44202 error = mnt_want_write(nd.path.mnt);
44203 if (error)
44204 goto out_dput;
44205@@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44206 if (error)
44207 goto out_drop_write;
44208 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
44209+ if (!error)
44210+ gr_handle_create(new_dentry, nd.path.mnt);
44211 out_drop_write:
44212 mnt_drop_write(nd.path.mnt);
44213 out_dput:
44214@@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44215 char *to;
44216 int error;
44217
44218+ pax_track_stack();
44219+
44220 error = user_path_parent(olddfd, oldname, &oldnd, &from);
44221 if (error)
44222 goto exit;
44223@@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44224 if (new_dentry == trap)
44225 goto exit5;
44226
44227+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44228+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
44229+ to);
44230+ if (error)
44231+ goto exit5;
44232+
44233 error = mnt_want_write(oldnd.path.mnt);
44234 if (error)
44235 goto exit5;
44236@@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44237 goto exit6;
44238 error = vfs_rename(old_dir->d_inode, old_dentry,
44239 new_dir->d_inode, new_dentry);
44240+ if (!error)
44241+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44242+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44243 exit6:
44244 mnt_drop_write(oldnd.path.mnt);
44245 exit5:
44246@@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
44247
44248 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44249 {
44250+ char tmpbuf[64];
44251+ const char *newlink;
44252 int len;
44253
44254 len = PTR_ERR(link);
44255@@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
44256 len = strlen(link);
44257 if (len > (unsigned) buflen)
44258 len = buflen;
44259- if (copy_to_user(buffer, link, len))
44260+
44261+ if (len < sizeof(tmpbuf)) {
44262+ memcpy(tmpbuf, link, len);
44263+ newlink = tmpbuf;
44264+ } else
44265+ newlink = link;
44266+
44267+ if (copy_to_user(buffer, newlink, len))
44268 len = -EFAULT;
44269 out:
44270 return len;
44271diff -urNp linux-2.6.32.45/fs/namespace.c linux-2.6.32.45/fs/namespace.c
44272--- linux-2.6.32.45/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
44273+++ linux-2.6.32.45/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
44274@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
44275 if (!(sb->s_flags & MS_RDONLY))
44276 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44277 up_write(&sb->s_umount);
44278+
44279+ gr_log_remount(mnt->mnt_devname, retval);
44280+
44281 return retval;
44282 }
44283
44284@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
44285 security_sb_umount_busy(mnt);
44286 up_write(&namespace_sem);
44287 release_mounts(&umount_list);
44288+
44289+ gr_log_unmount(mnt->mnt_devname, retval);
44290+
44291 return retval;
44292 }
44293
44294@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
44295 if (retval)
44296 goto dput_out;
44297
44298+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44299+ retval = -EPERM;
44300+ goto dput_out;
44301+ }
44302+
44303+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44304+ retval = -EPERM;
44305+ goto dput_out;
44306+ }
44307+
44308 if (flags & MS_REMOUNT)
44309 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44310 data_page);
44311@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
44312 dev_name, data_page);
44313 dput_out:
44314 path_put(&path);
44315+
44316+ gr_log_mount(dev_name, dir_name, retval);
44317+
44318 return retval;
44319 }
44320
44321@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
44322 goto out1;
44323 }
44324
44325+ if (gr_handle_chroot_pivot()) {
44326+ error = -EPERM;
44327+ path_put(&old);
44328+ goto out1;
44329+ }
44330+
44331 read_lock(&current->fs->lock);
44332 root = current->fs->root;
44333 path_get(&current->fs->root);
44334diff -urNp linux-2.6.32.45/fs/ncpfs/dir.c linux-2.6.32.45/fs/ncpfs/dir.c
44335--- linux-2.6.32.45/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
44336+++ linux-2.6.32.45/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
44337@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
44338 int res, val = 0, len;
44339 __u8 __name[NCP_MAXPATHLEN + 1];
44340
44341+ pax_track_stack();
44342+
44343 parent = dget_parent(dentry);
44344 dir = parent->d_inode;
44345
44346@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
44347 int error, res, len;
44348 __u8 __name[NCP_MAXPATHLEN + 1];
44349
44350+ pax_track_stack();
44351+
44352 lock_kernel();
44353 error = -EIO;
44354 if (!ncp_conn_valid(server))
44355@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
44356 int error, result, len;
44357 int opmode;
44358 __u8 __name[NCP_MAXPATHLEN + 1];
44359-
44360+
44361 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
44362 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
44363
44364+ pax_track_stack();
44365+
44366 error = -EIO;
44367 lock_kernel();
44368 if (!ncp_conn_valid(server))
44369@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
44370 int error, len;
44371 __u8 __name[NCP_MAXPATHLEN + 1];
44372
44373+ pax_track_stack();
44374+
44375 DPRINTK("ncp_mkdir: making %s/%s\n",
44376 dentry->d_parent->d_name.name, dentry->d_name.name);
44377
44378@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
44379 if (!ncp_conn_valid(server))
44380 goto out;
44381
44382+ pax_track_stack();
44383+
44384 ncp_age_dentry(server, dentry);
44385 len = sizeof(__name);
44386 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
44387@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
44388 int old_len, new_len;
44389 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
44390
44391+ pax_track_stack();
44392+
44393 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
44394 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
44395 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
44396diff -urNp linux-2.6.32.45/fs/ncpfs/inode.c linux-2.6.32.45/fs/ncpfs/inode.c
44397--- linux-2.6.32.45/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
44398+++ linux-2.6.32.45/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
44399@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
44400 #endif
44401 struct ncp_entry_info finfo;
44402
44403+ pax_track_stack();
44404+
44405 data.wdog_pid = NULL;
44406 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
44407 if (!server)
44408diff -urNp linux-2.6.32.45/fs/nfs/inode.c linux-2.6.32.45/fs/nfs/inode.c
44409--- linux-2.6.32.45/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
44410+++ linux-2.6.32.45/fs/nfs/inode.c 2011-07-06 19:53:33.000000000 -0400
44411@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct
44412 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44413 nfsi->attrtimeo_timestamp = jiffies;
44414
44415- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44416+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44417 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44418 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44419 else
44420@@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
44421 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44422 }
44423
44424-static atomic_long_t nfs_attr_generation_counter;
44425+static atomic_long_unchecked_t nfs_attr_generation_counter;
44426
44427 static unsigned long nfs_read_attr_generation_counter(void)
44428 {
44429- return atomic_long_read(&nfs_attr_generation_counter);
44430+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44431 }
44432
44433 unsigned long nfs_inc_attr_generation_counter(void)
44434 {
44435- return atomic_long_inc_return(&nfs_attr_generation_counter);
44436+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44437 }
44438
44439 void nfs_fattr_init(struct nfs_fattr *fattr)
44440diff -urNp linux-2.6.32.45/fs/nfsd/lockd.c linux-2.6.32.45/fs/nfsd/lockd.c
44441--- linux-2.6.32.45/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
44442+++ linux-2.6.32.45/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
44443@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
44444 fput(filp);
44445 }
44446
44447-static struct nlmsvc_binding nfsd_nlm_ops = {
44448+static const struct nlmsvc_binding nfsd_nlm_ops = {
44449 .fopen = nlm_fopen, /* open file for locking */
44450 .fclose = nlm_fclose, /* close file */
44451 };
44452diff -urNp linux-2.6.32.45/fs/nfsd/nfs4state.c linux-2.6.32.45/fs/nfsd/nfs4state.c
44453--- linux-2.6.32.45/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
44454+++ linux-2.6.32.45/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
44455@@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
44456 unsigned int cmd;
44457 int err;
44458
44459+ pax_track_stack();
44460+
44461 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
44462 (long long) lock->lk_offset,
44463 (long long) lock->lk_length);
44464diff -urNp linux-2.6.32.45/fs/nfsd/nfs4xdr.c linux-2.6.32.45/fs/nfsd/nfs4xdr.c
44465--- linux-2.6.32.45/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
44466+++ linux-2.6.32.45/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
44467@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
44468 struct nfsd4_compoundres *resp = rqstp->rq_resp;
44469 u32 minorversion = resp->cstate.minorversion;
44470
44471+ pax_track_stack();
44472+
44473 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
44474 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
44475 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
44476diff -urNp linux-2.6.32.45/fs/nfsd/vfs.c linux-2.6.32.45/fs/nfsd/vfs.c
44477--- linux-2.6.32.45/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
44478+++ linux-2.6.32.45/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
44479@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
44480 } else {
44481 oldfs = get_fs();
44482 set_fs(KERNEL_DS);
44483- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44484+ host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
44485 set_fs(oldfs);
44486 }
44487
44488@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
44489
44490 /* Write the data. */
44491 oldfs = get_fs(); set_fs(KERNEL_DS);
44492- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44493+ host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
44494 set_fs(oldfs);
44495 if (host_err < 0)
44496 goto out_nfserr;
44497@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
44498 */
44499
44500 oldfs = get_fs(); set_fs(KERNEL_DS);
44501- host_err = inode->i_op->readlink(dentry, buf, *lenp);
44502+ host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
44503 set_fs(oldfs);
44504
44505 if (host_err < 0)
44506diff -urNp linux-2.6.32.45/fs/nilfs2/ioctl.c linux-2.6.32.45/fs/nilfs2/ioctl.c
44507--- linux-2.6.32.45/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
44508+++ linux-2.6.32.45/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
44509@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
44510 unsigned int cmd, void __user *argp)
44511 {
44512 struct nilfs_argv argv[5];
44513- const static size_t argsz[5] = {
44514+ static const size_t argsz[5] = {
44515 sizeof(struct nilfs_vdesc),
44516 sizeof(struct nilfs_period),
44517 sizeof(__u64),
44518diff -urNp linux-2.6.32.45/fs/notify/dnotify/dnotify.c linux-2.6.32.45/fs/notify/dnotify/dnotify.c
44519--- linux-2.6.32.45/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
44520+++ linux-2.6.32.45/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
44521@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
44522 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
44523 }
44524
44525-static struct fsnotify_ops dnotify_fsnotify_ops = {
44526+static const struct fsnotify_ops dnotify_fsnotify_ops = {
44527 .handle_event = dnotify_handle_event,
44528 .should_send_event = dnotify_should_send_event,
44529 .free_group_priv = NULL,
44530diff -urNp linux-2.6.32.45/fs/notify/notification.c linux-2.6.32.45/fs/notify/notification.c
44531--- linux-2.6.32.45/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
44532+++ linux-2.6.32.45/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
44533@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
44534 * get set to 0 so it will never get 'freed'
44535 */
44536 static struct fsnotify_event q_overflow_event;
44537-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44538+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44539
44540 /**
44541 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44542@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44543 */
44544 u32 fsnotify_get_cookie(void)
44545 {
44546- return atomic_inc_return(&fsnotify_sync_cookie);
44547+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44548 }
44549 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44550
44551diff -urNp linux-2.6.32.45/fs/ntfs/dir.c linux-2.6.32.45/fs/ntfs/dir.c
44552--- linux-2.6.32.45/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
44553+++ linux-2.6.32.45/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
44554@@ -1328,7 +1328,7 @@ find_next_index_buffer:
44555 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44556 ~(s64)(ndir->itype.index.block_size - 1)));
44557 /* Bounds checks. */
44558- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44559+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44560 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44561 "inode 0x%lx or driver bug.", vdir->i_ino);
44562 goto err_out;
44563diff -urNp linux-2.6.32.45/fs/ntfs/file.c linux-2.6.32.45/fs/ntfs/file.c
44564--- linux-2.6.32.45/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
44565+++ linux-2.6.32.45/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
44566@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
44567 #endif /* NTFS_RW */
44568 };
44569
44570-const struct file_operations ntfs_empty_file_ops = {};
44571+const struct file_operations ntfs_empty_file_ops __read_only;
44572
44573-const struct inode_operations ntfs_empty_inode_ops = {};
44574+const struct inode_operations ntfs_empty_inode_ops __read_only;
44575diff -urNp linux-2.6.32.45/fs/ocfs2/cluster/masklog.c linux-2.6.32.45/fs/ocfs2/cluster/masklog.c
44576--- linux-2.6.32.45/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
44577+++ linux-2.6.32.45/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
44578@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
44579 return mlog_mask_store(mlog_attr->mask, buf, count);
44580 }
44581
44582-static struct sysfs_ops mlog_attr_ops = {
44583+static const struct sysfs_ops mlog_attr_ops = {
44584 .show = mlog_show,
44585 .store = mlog_store,
44586 };
44587diff -urNp linux-2.6.32.45/fs/ocfs2/localalloc.c linux-2.6.32.45/fs/ocfs2/localalloc.c
44588--- linux-2.6.32.45/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
44589+++ linux-2.6.32.45/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
44590@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
44591 goto bail;
44592 }
44593
44594- atomic_inc(&osb->alloc_stats.moves);
44595+ atomic_inc_unchecked(&osb->alloc_stats.moves);
44596
44597 status = 0;
44598 bail:
44599diff -urNp linux-2.6.32.45/fs/ocfs2/namei.c linux-2.6.32.45/fs/ocfs2/namei.c
44600--- linux-2.6.32.45/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
44601+++ linux-2.6.32.45/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
44602@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
44603 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
44604 struct ocfs2_dir_lookup_result target_insert = { NULL, };
44605
44606+ pax_track_stack();
44607+
44608 /* At some point it might be nice to break this function up a
44609 * bit. */
44610
44611diff -urNp linux-2.6.32.45/fs/ocfs2/ocfs2.h linux-2.6.32.45/fs/ocfs2/ocfs2.h
44612--- linux-2.6.32.45/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
44613+++ linux-2.6.32.45/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
44614@@ -217,11 +217,11 @@ enum ocfs2_vol_state
44615
44616 struct ocfs2_alloc_stats
44617 {
44618- atomic_t moves;
44619- atomic_t local_data;
44620- atomic_t bitmap_data;
44621- atomic_t bg_allocs;
44622- atomic_t bg_extends;
44623+ atomic_unchecked_t moves;
44624+ atomic_unchecked_t local_data;
44625+ atomic_unchecked_t bitmap_data;
44626+ atomic_unchecked_t bg_allocs;
44627+ atomic_unchecked_t bg_extends;
44628 };
44629
44630 enum ocfs2_local_alloc_state
44631diff -urNp linux-2.6.32.45/fs/ocfs2/suballoc.c linux-2.6.32.45/fs/ocfs2/suballoc.c
44632--- linux-2.6.32.45/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
44633+++ linux-2.6.32.45/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
44634@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
44635 mlog_errno(status);
44636 goto bail;
44637 }
44638- atomic_inc(&osb->alloc_stats.bg_extends);
44639+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44640
44641 /* You should never ask for this much metadata */
44642 BUG_ON(bits_wanted >
44643@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
44644 mlog_errno(status);
44645 goto bail;
44646 }
44647- atomic_inc(&osb->alloc_stats.bg_allocs);
44648+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44649
44650 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
44651 ac->ac_bits_given += (*num_bits);
44652@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
44653 mlog_errno(status);
44654 goto bail;
44655 }
44656- atomic_inc(&osb->alloc_stats.bg_allocs);
44657+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44658
44659 BUG_ON(num_bits != 1);
44660
44661@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44662 cluster_start,
44663 num_clusters);
44664 if (!status)
44665- atomic_inc(&osb->alloc_stats.local_data);
44666+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
44667 } else {
44668 if (min_clusters > (osb->bitmap_cpg - 1)) {
44669 /* The only paths asking for contiguousness
44670@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44671 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44672 bg_blkno,
44673 bg_bit_off);
44674- atomic_inc(&osb->alloc_stats.bitmap_data);
44675+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44676 }
44677 }
44678 if (status < 0) {
44679diff -urNp linux-2.6.32.45/fs/ocfs2/super.c linux-2.6.32.45/fs/ocfs2/super.c
44680--- linux-2.6.32.45/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
44681+++ linux-2.6.32.45/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
44682@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
44683 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44684 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44685 "Stats",
44686- atomic_read(&osb->alloc_stats.bitmap_data),
44687- atomic_read(&osb->alloc_stats.local_data),
44688- atomic_read(&osb->alloc_stats.bg_allocs),
44689- atomic_read(&osb->alloc_stats.moves),
44690- atomic_read(&osb->alloc_stats.bg_extends));
44691+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44692+ atomic_read_unchecked(&osb->alloc_stats.local_data),
44693+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44694+ atomic_read_unchecked(&osb->alloc_stats.moves),
44695+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44696
44697 out += snprintf(buf + out, len - out,
44698 "%10s => State: %u Descriptor: %llu Size: %u bits "
44699@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
44700 spin_lock_init(&osb->osb_xattr_lock);
44701 ocfs2_init_inode_steal_slot(osb);
44702
44703- atomic_set(&osb->alloc_stats.moves, 0);
44704- atomic_set(&osb->alloc_stats.local_data, 0);
44705- atomic_set(&osb->alloc_stats.bitmap_data, 0);
44706- atomic_set(&osb->alloc_stats.bg_allocs, 0);
44707- atomic_set(&osb->alloc_stats.bg_extends, 0);
44708+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44709+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44710+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44711+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44712+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44713
44714 /* Copy the blockcheck stats from the superblock probe */
44715 osb->osb_ecc_stats = *stats;
44716diff -urNp linux-2.6.32.45/fs/open.c linux-2.6.32.45/fs/open.c
44717--- linux-2.6.32.45/fs/open.c 2011-03-27 14:31:47.000000000 -0400
44718+++ linux-2.6.32.45/fs/open.c 2011-04-17 15:56:46.000000000 -0400
44719@@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
44720 error = locks_verify_truncate(inode, NULL, length);
44721 if (!error)
44722 error = security_path_truncate(&path, length, 0);
44723+
44724+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
44725+ error = -EACCES;
44726+
44727 if (!error) {
44728 vfs_dq_init(inode);
44729 error = do_truncate(path.dentry, length, 0, NULL);
44730@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
44731 if (__mnt_is_readonly(path.mnt))
44732 res = -EROFS;
44733
44734+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
44735+ res = -EACCES;
44736+
44737 out_path_release:
44738 path_put(&path);
44739 out:
44740@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
44741 if (error)
44742 goto dput_and_out;
44743
44744+ gr_log_chdir(path.dentry, path.mnt);
44745+
44746 set_fs_pwd(current->fs, &path);
44747
44748 dput_and_out:
44749@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
44750 goto out_putf;
44751
44752 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
44753+
44754+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
44755+ error = -EPERM;
44756+
44757+ if (!error)
44758+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
44759+
44760 if (!error)
44761 set_fs_pwd(current->fs, &file->f_path);
44762 out_putf:
44763@@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
44764 if (!capable(CAP_SYS_CHROOT))
44765 goto dput_and_out;
44766
44767+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
44768+ goto dput_and_out;
44769+
44770+ if (gr_handle_chroot_caps(&path)) {
44771+ error = -ENOMEM;
44772+ goto dput_and_out;
44773+ }
44774+
44775 set_fs_root(current->fs, &path);
44776+
44777+ gr_handle_chroot_chdir(&path);
44778+
44779 error = 0;
44780 dput_and_out:
44781 path_put(&path);
44782@@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
44783 err = mnt_want_write_file(file);
44784 if (err)
44785 goto out_putf;
44786+
44787 mutex_lock(&inode->i_mutex);
44788+
44789+ if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
44790+ err = -EACCES;
44791+ goto out_unlock;
44792+ }
44793+
44794 if (mode == (mode_t) -1)
44795 mode = inode->i_mode;
44796+
44797+ if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
44798+ err = -EPERM;
44799+ goto out_unlock;
44800+ }
44801+
44802 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44803 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44804 err = notify_change(dentry, &newattrs);
44805+
44806+out_unlock:
44807 mutex_unlock(&inode->i_mutex);
44808 mnt_drop_write(file->f_path.mnt);
44809 out_putf:
44810@@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
44811 error = mnt_want_write(path.mnt);
44812 if (error)
44813 goto dput_and_out;
44814+
44815 mutex_lock(&inode->i_mutex);
44816+
44817+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
44818+ error = -EACCES;
44819+ goto out_unlock;
44820+ }
44821+
44822 if (mode == (mode_t) -1)
44823 mode = inode->i_mode;
44824+
44825+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
44826+ error = -EACCES;
44827+ goto out_unlock;
44828+ }
44829+
44830 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
44831 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
44832 error = notify_change(path.dentry, &newattrs);
44833+
44834+out_unlock:
44835 mutex_unlock(&inode->i_mutex);
44836 mnt_drop_write(path.mnt);
44837 dput_and_out:
44838@@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
44839 return sys_fchmodat(AT_FDCWD, filename, mode);
44840 }
44841
44842-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
44843+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
44844 {
44845 struct inode *inode = dentry->d_inode;
44846 int error;
44847 struct iattr newattrs;
44848
44849+ if (!gr_acl_handle_chown(dentry, mnt))
44850+ return -EACCES;
44851+
44852 newattrs.ia_valid = ATTR_CTIME;
44853 if (user != (uid_t) -1) {
44854 newattrs.ia_valid |= ATTR_UID;
44855@@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
44856 error = mnt_want_write(path.mnt);
44857 if (error)
44858 goto out_release;
44859- error = chown_common(path.dentry, user, group);
44860+ error = chown_common(path.dentry, user, group, path.mnt);
44861 mnt_drop_write(path.mnt);
44862 out_release:
44863 path_put(&path);
44864@@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
44865 error = mnt_want_write(path.mnt);
44866 if (error)
44867 goto out_release;
44868- error = chown_common(path.dentry, user, group);
44869+ error = chown_common(path.dentry, user, group, path.mnt);
44870 mnt_drop_write(path.mnt);
44871 out_release:
44872 path_put(&path);
44873@@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
44874 error = mnt_want_write(path.mnt);
44875 if (error)
44876 goto out_release;
44877- error = chown_common(path.dentry, user, group);
44878+ error = chown_common(path.dentry, user, group, path.mnt);
44879 mnt_drop_write(path.mnt);
44880 out_release:
44881 path_put(&path);
44882@@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
44883 goto out_fput;
44884 dentry = file->f_path.dentry;
44885 audit_inode(NULL, dentry);
44886- error = chown_common(dentry, user, group);
44887+ error = chown_common(dentry, user, group, file->f_path.mnt);
44888 mnt_drop_write(file->f_path.mnt);
44889 out_fput:
44890 fput(file);
44891@@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
44892 if (!IS_ERR(tmp)) {
44893 fd = get_unused_fd_flags(flags);
44894 if (fd >= 0) {
44895- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
44896+ struct file *f;
44897+ /* don't allow to be set by userland */
44898+ flags &= ~FMODE_GREXEC;
44899+ f = do_filp_open(dfd, tmp, flags, mode, 0);
44900 if (IS_ERR(f)) {
44901 put_unused_fd(fd);
44902 fd = PTR_ERR(f);
44903diff -urNp linux-2.6.32.45/fs/partitions/ldm.c linux-2.6.32.45/fs/partitions/ldm.c
44904--- linux-2.6.32.45/fs/partitions/ldm.c 2011-06-25 12:55:34.000000000 -0400
44905+++ linux-2.6.32.45/fs/partitions/ldm.c 2011-06-25 12:56:37.000000000 -0400
44906@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
44907 ldm_error ("A VBLK claims to have %d parts.", num);
44908 return false;
44909 }
44910+
44911 if (rec >= num) {
44912 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
44913 return false;
44914@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
44915 goto found;
44916 }
44917
44918- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
44919+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
44920 if (!f) {
44921 ldm_crit ("Out of memory.");
44922 return false;
44923diff -urNp linux-2.6.32.45/fs/partitions/mac.c linux-2.6.32.45/fs/partitions/mac.c
44924--- linux-2.6.32.45/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
44925+++ linux-2.6.32.45/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
44926@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
44927 return 0; /* not a MacOS disk */
44928 }
44929 blocks_in_map = be32_to_cpu(part->map_count);
44930+ printk(" [mac]");
44931 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
44932 put_dev_sector(sect);
44933 return 0;
44934 }
44935- printk(" [mac]");
44936 for (slot = 1; slot <= blocks_in_map; ++slot) {
44937 int pos = slot * secsize;
44938 put_dev_sector(sect);
44939diff -urNp linux-2.6.32.45/fs/pipe.c linux-2.6.32.45/fs/pipe.c
44940--- linux-2.6.32.45/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
44941+++ linux-2.6.32.45/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
44942@@ -401,9 +401,9 @@ redo:
44943 }
44944 if (bufs) /* More to do? */
44945 continue;
44946- if (!pipe->writers)
44947+ if (!atomic_read(&pipe->writers))
44948 break;
44949- if (!pipe->waiting_writers) {
44950+ if (!atomic_read(&pipe->waiting_writers)) {
44951 /* syscall merging: Usually we must not sleep
44952 * if O_NONBLOCK is set, or if we got some data.
44953 * But if a writer sleeps in kernel space, then
44954@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
44955 mutex_lock(&inode->i_mutex);
44956 pipe = inode->i_pipe;
44957
44958- if (!pipe->readers) {
44959+ if (!atomic_read(&pipe->readers)) {
44960 send_sig(SIGPIPE, current, 0);
44961 ret = -EPIPE;
44962 goto out;
44963@@ -511,7 +511,7 @@ redo1:
44964 for (;;) {
44965 int bufs;
44966
44967- if (!pipe->readers) {
44968+ if (!atomic_read(&pipe->readers)) {
44969 send_sig(SIGPIPE, current, 0);
44970 if (!ret)
44971 ret = -EPIPE;
44972@@ -597,9 +597,9 @@ redo2:
44973 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44974 do_wakeup = 0;
44975 }
44976- pipe->waiting_writers++;
44977+ atomic_inc(&pipe->waiting_writers);
44978 pipe_wait(pipe);
44979- pipe->waiting_writers--;
44980+ atomic_dec(&pipe->waiting_writers);
44981 }
44982 out:
44983 mutex_unlock(&inode->i_mutex);
44984@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
44985 mask = 0;
44986 if (filp->f_mode & FMODE_READ) {
44987 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
44988- if (!pipe->writers && filp->f_version != pipe->w_counter)
44989+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
44990 mask |= POLLHUP;
44991 }
44992
44993@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
44994 * Most Unices do not set POLLERR for FIFOs but on Linux they
44995 * behave exactly like pipes for poll().
44996 */
44997- if (!pipe->readers)
44998+ if (!atomic_read(&pipe->readers))
44999 mask |= POLLERR;
45000 }
45001
45002@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
45003
45004 mutex_lock(&inode->i_mutex);
45005 pipe = inode->i_pipe;
45006- pipe->readers -= decr;
45007- pipe->writers -= decw;
45008+ atomic_sub(decr, &pipe->readers);
45009+ atomic_sub(decw, &pipe->writers);
45010
45011- if (!pipe->readers && !pipe->writers) {
45012+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45013 free_pipe_info(inode);
45014 } else {
45015 wake_up_interruptible_sync(&pipe->wait);
45016@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
45017
45018 if (inode->i_pipe) {
45019 ret = 0;
45020- inode->i_pipe->readers++;
45021+ atomic_inc(&inode->i_pipe->readers);
45022 }
45023
45024 mutex_unlock(&inode->i_mutex);
45025@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
45026
45027 if (inode->i_pipe) {
45028 ret = 0;
45029- inode->i_pipe->writers++;
45030+ atomic_inc(&inode->i_pipe->writers);
45031 }
45032
45033 mutex_unlock(&inode->i_mutex);
45034@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
45035 if (inode->i_pipe) {
45036 ret = 0;
45037 if (filp->f_mode & FMODE_READ)
45038- inode->i_pipe->readers++;
45039+ atomic_inc(&inode->i_pipe->readers);
45040 if (filp->f_mode & FMODE_WRITE)
45041- inode->i_pipe->writers++;
45042+ atomic_inc(&inode->i_pipe->writers);
45043 }
45044
45045 mutex_unlock(&inode->i_mutex);
45046@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
45047 inode->i_pipe = NULL;
45048 }
45049
45050-static struct vfsmount *pipe_mnt __read_mostly;
45051+struct vfsmount *pipe_mnt __read_mostly;
45052 static int pipefs_delete_dentry(struct dentry *dentry)
45053 {
45054 /*
45055@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
45056 goto fail_iput;
45057 inode->i_pipe = pipe;
45058
45059- pipe->readers = pipe->writers = 1;
45060+ atomic_set(&pipe->readers, 1);
45061+ atomic_set(&pipe->writers, 1);
45062 inode->i_fop = &rdwr_pipefifo_fops;
45063
45064 /*
45065diff -urNp linux-2.6.32.45/fs/proc/array.c linux-2.6.32.45/fs/proc/array.c
45066--- linux-2.6.32.45/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
45067+++ linux-2.6.32.45/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
45068@@ -60,6 +60,7 @@
45069 #include <linux/tty.h>
45070 #include <linux/string.h>
45071 #include <linux/mman.h>
45072+#include <linux/grsecurity.h>
45073 #include <linux/proc_fs.h>
45074 #include <linux/ioport.h>
45075 #include <linux/uaccess.h>
45076@@ -321,6 +322,21 @@ static inline void task_context_switch_c
45077 p->nivcsw);
45078 }
45079
45080+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45081+static inline void task_pax(struct seq_file *m, struct task_struct *p)
45082+{
45083+ if (p->mm)
45084+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45085+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45086+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45087+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45088+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45089+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45090+ else
45091+ seq_printf(m, "PaX:\t-----\n");
45092+}
45093+#endif
45094+
45095 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45096 struct pid *pid, struct task_struct *task)
45097 {
45098@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
45099 task_cap(m, task);
45100 cpuset_task_status_allowed(m, task);
45101 task_context_switch_counts(m, task);
45102+
45103+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45104+ task_pax(m, task);
45105+#endif
45106+
45107+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45108+ task_grsec_rbac(m, task);
45109+#endif
45110+
45111 return 0;
45112 }
45113
45114+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45115+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45116+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45117+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45118+#endif
45119+
45120 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45121 struct pid *pid, struct task_struct *task, int whole)
45122 {
45123@@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
45124 cputime_t cutime, cstime, utime, stime;
45125 cputime_t cgtime, gtime;
45126 unsigned long rsslim = 0;
45127- char tcomm[sizeof(task->comm)];
45128+ char tcomm[sizeof(task->comm)] = { 0 };
45129 unsigned long flags;
45130
45131+ pax_track_stack();
45132+
45133 state = *get_task_state(task);
45134 vsize = eip = esp = 0;
45135 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
45136@@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
45137 gtime = task_gtime(task);
45138 }
45139
45140+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45141+ if (PAX_RAND_FLAGS(mm)) {
45142+ eip = 0;
45143+ esp = 0;
45144+ wchan = 0;
45145+ }
45146+#endif
45147+#ifdef CONFIG_GRKERNSEC_HIDESYM
45148+ wchan = 0;
45149+ eip =0;
45150+ esp =0;
45151+#endif
45152+
45153 /* scale priority and nice values from timeslices to -20..20 */
45154 /* to make it look like a "normal" Unix priority/nice value */
45155 priority = task_prio(task);
45156@@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
45157 vsize,
45158 mm ? get_mm_rss(mm) : 0,
45159 rsslim,
45160+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45161+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45162+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45163+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45164+#else
45165 mm ? (permitted ? mm->start_code : 1) : 0,
45166 mm ? (permitted ? mm->end_code : 1) : 0,
45167 (permitted && mm) ? mm->start_stack : 0,
45168+#endif
45169 esp,
45170 eip,
45171 /* The signal information here is obsolete.
45172@@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
45173
45174 return 0;
45175 }
45176+
45177+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45178+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45179+{
45180+ u32 curr_ip = 0;
45181+ unsigned long flags;
45182+
45183+ if (lock_task_sighand(task, &flags)) {
45184+ curr_ip = task->signal->curr_ip;
45185+ unlock_task_sighand(task, &flags);
45186+ }
45187+
45188+ return sprintf(buffer, "%pI4\n", &curr_ip);
45189+}
45190+#endif
45191diff -urNp linux-2.6.32.45/fs/proc/base.c linux-2.6.32.45/fs/proc/base.c
45192--- linux-2.6.32.45/fs/proc/base.c 2011-08-09 18:35:30.000000000 -0400
45193+++ linux-2.6.32.45/fs/proc/base.c 2011-08-09 18:34:33.000000000 -0400
45194@@ -102,6 +102,22 @@ struct pid_entry {
45195 union proc_op op;
45196 };
45197
45198+struct getdents_callback {
45199+ struct linux_dirent __user * current_dir;
45200+ struct linux_dirent __user * previous;
45201+ struct file * file;
45202+ int count;
45203+ int error;
45204+};
45205+
45206+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45207+ loff_t offset, u64 ino, unsigned int d_type)
45208+{
45209+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
45210+ buf->error = -EINVAL;
45211+ return 0;
45212+}
45213+
45214 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45215 .name = (NAME), \
45216 .len = sizeof(NAME) - 1, \
45217@@ -213,6 +229,9 @@ static int check_mem_permission(struct t
45218 if (task == current)
45219 return 0;
45220
45221+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
45222+ return -EPERM;
45223+
45224 /*
45225 * If current is actively ptrace'ing, and would also be
45226 * permitted to freshly attach with ptrace now, permit it.
45227@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
45228 if (!mm->arg_end)
45229 goto out_mm; /* Shh! No looking before we're done */
45230
45231+ if (gr_acl_handle_procpidmem(task))
45232+ goto out_mm;
45233+
45234 len = mm->arg_end - mm->arg_start;
45235
45236 if (len > PAGE_SIZE)
45237@@ -287,12 +309,28 @@ out:
45238 return res;
45239 }
45240
45241+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45242+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45243+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45244+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45245+#endif
45246+
45247 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45248 {
45249 int res = 0;
45250 struct mm_struct *mm = get_task_mm(task);
45251 if (mm) {
45252 unsigned int nwords = 0;
45253+
45254+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45255+ /* allow if we're currently ptracing this task */
45256+ if (PAX_RAND_FLAGS(mm) &&
45257+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45258+ mmput(mm);
45259+ return res;
45260+ }
45261+#endif
45262+
45263 do {
45264 nwords += 2;
45265 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45266@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
45267 }
45268
45269
45270-#ifdef CONFIG_KALLSYMS
45271+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45272 /*
45273 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45274 * Returns the resolved symbol. If that fails, simply return the address.
45275@@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
45276 }
45277 #endif /* CONFIG_KALLSYMS */
45278
45279-#ifdef CONFIG_STACKTRACE
45280+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45281
45282 #define MAX_STACK_TRACE_DEPTH 64
45283
45284@@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
45285 return count;
45286 }
45287
45288-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45289+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45290 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45291 {
45292 long nr;
45293@@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
45294 /************************************************************************/
45295
45296 /* permission checks */
45297-static int proc_fd_access_allowed(struct inode *inode)
45298+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45299 {
45300 struct task_struct *task;
45301 int allowed = 0;
45302@@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
45303 */
45304 task = get_proc_task(inode);
45305 if (task) {
45306- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45307+ if (log)
45308+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45309+ else
45310+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45311 put_task_struct(task);
45312 }
45313 return allowed;
45314@@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
45315 if (!task)
45316 goto out_no_task;
45317
45318+ if (gr_acl_handle_procpidmem(task))
45319+ goto out;
45320+
45321 if (!ptrace_may_access(task, PTRACE_MODE_READ))
45322 goto out;
45323
45324@@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
45325 path_put(&nd->path);
45326
45327 /* Are we allowed to snoop on the tasks file descriptors? */
45328- if (!proc_fd_access_allowed(inode))
45329+ if (!proc_fd_access_allowed(inode,0))
45330 goto out;
45331
45332 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45333@@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
45334 struct path path;
45335
45336 /* Are we allowed to snoop on the tasks file descriptors? */
45337- if (!proc_fd_access_allowed(inode))
45338- goto out;
45339+ /* logging this is needed for learning on chromium to work properly,
45340+ but we don't want to flood the logs from 'ps' which does a readlink
45341+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45342+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
45343+ */
45344+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45345+ if (!proc_fd_access_allowed(inode,0))
45346+ goto out;
45347+ } else {
45348+ if (!proc_fd_access_allowed(inode,1))
45349+ goto out;
45350+ }
45351
45352 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45353 if (error)
45354@@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
45355 rcu_read_lock();
45356 cred = __task_cred(task);
45357 inode->i_uid = cred->euid;
45358+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45359+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45360+#else
45361 inode->i_gid = cred->egid;
45362+#endif
45363 rcu_read_unlock();
45364 }
45365 security_task_to_inode(task, inode);
45366@@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
45367 struct inode *inode = dentry->d_inode;
45368 struct task_struct *task;
45369 const struct cred *cred;
45370+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45371+ const struct cred *tmpcred = current_cred();
45372+#endif
45373
45374 generic_fillattr(inode, stat);
45375
45376@@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
45377 stat->uid = 0;
45378 stat->gid = 0;
45379 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45380+
45381+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45382+ rcu_read_unlock();
45383+ return -ENOENT;
45384+ }
45385+
45386 if (task) {
45387+ cred = __task_cred(task);
45388+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45389+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45390+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45391+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45392+#endif
45393+ ) {
45394+#endif
45395 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45396+#ifdef CONFIG_GRKERNSEC_PROC_USER
45397+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45398+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45399+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45400+#endif
45401 task_dumpable(task)) {
45402- cred = __task_cred(task);
45403 stat->uid = cred->euid;
45404+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45405+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45406+#else
45407 stat->gid = cred->egid;
45408+#endif
45409 }
45410+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45411+ } else {
45412+ rcu_read_unlock();
45413+ return -ENOENT;
45414+ }
45415+#endif
45416 }
45417 rcu_read_unlock();
45418 return 0;
45419@@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
45420
45421 if (task) {
45422 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45423+#ifdef CONFIG_GRKERNSEC_PROC_USER
45424+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45425+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45426+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45427+#endif
45428 task_dumpable(task)) {
45429 rcu_read_lock();
45430 cred = __task_cred(task);
45431 inode->i_uid = cred->euid;
45432+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45433+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45434+#else
45435 inode->i_gid = cred->egid;
45436+#endif
45437 rcu_read_unlock();
45438 } else {
45439 inode->i_uid = 0;
45440@@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
45441 int fd = proc_fd(inode);
45442
45443 if (task) {
45444- files = get_files_struct(task);
45445+ if (!gr_acl_handle_procpidmem(task))
45446+ files = get_files_struct(task);
45447 put_task_struct(task);
45448 }
45449 if (files) {
45450@@ -1895,12 +1994,22 @@ static const struct file_operations proc
45451 static int proc_fd_permission(struct inode *inode, int mask)
45452 {
45453 int rv;
45454+ struct task_struct *task;
45455
45456 rv = generic_permission(inode, mask, NULL);
45457- if (rv == 0)
45458- return 0;
45459+
45460 if (task_pid(current) == proc_pid(inode))
45461 rv = 0;
45462+
45463+ task = get_proc_task(inode);
45464+ if (task == NULL)
45465+ return rv;
45466+
45467+ if (gr_acl_handle_procpidmem(task))
45468+ rv = -EACCES;
45469+
45470+ put_task_struct(task);
45471+
45472 return rv;
45473 }
45474
45475@@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
45476 if (!task)
45477 goto out_no_task;
45478
45479+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45480+ goto out;
45481+
45482 /*
45483 * Yes, it does not scale. And it should not. Don't add
45484 * new entries into /proc/<tgid>/ without very good reasons.
45485@@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
45486 if (!task)
45487 goto out_no_task;
45488
45489+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45490+ goto out;
45491+
45492 ret = 0;
45493 i = filp->f_pos;
45494 switch (i) {
45495@@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
45496 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45497 void *cookie)
45498 {
45499- char *s = nd_get_link(nd);
45500+ const char *s = nd_get_link(nd);
45501 if (!IS_ERR(s))
45502 __putname(s);
45503 }
45504@@ -2522,7 +2637,7 @@ static const struct pid_entry tgid_base_
45505 #ifdef CONFIG_SCHED_DEBUG
45506 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45507 #endif
45508-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45509+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45510 INF("syscall", S_IRUSR, proc_pid_syscall),
45511 #endif
45512 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45513@@ -2547,10 +2662,10 @@ static const struct pid_entry tgid_base_
45514 #ifdef CONFIG_SECURITY
45515 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45516 #endif
45517-#ifdef CONFIG_KALLSYMS
45518+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45519 INF("wchan", S_IRUGO, proc_pid_wchan),
45520 #endif
45521-#ifdef CONFIG_STACKTRACE
45522+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45523 ONE("stack", S_IRUSR, proc_pid_stack),
45524 #endif
45525 #ifdef CONFIG_SCHEDSTATS
45526@@ -2580,6 +2695,9 @@ static const struct pid_entry tgid_base_
45527 #ifdef CONFIG_TASK_IO_ACCOUNTING
45528 INF("io", S_IRUSR, proc_tgid_io_accounting),
45529 #endif
45530+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45531+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45532+#endif
45533 };
45534
45535 static int proc_tgid_base_readdir(struct file * filp,
45536@@ -2704,7 +2822,14 @@ static struct dentry *proc_pid_instantia
45537 if (!inode)
45538 goto out;
45539
45540+#ifdef CONFIG_GRKERNSEC_PROC_USER
45541+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45542+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45543+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45544+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45545+#else
45546 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45547+#endif
45548 inode->i_op = &proc_tgid_base_inode_operations;
45549 inode->i_fop = &proc_tgid_base_operations;
45550 inode->i_flags|=S_IMMUTABLE;
45551@@ -2746,7 +2871,11 @@ struct dentry *proc_pid_lookup(struct in
45552 if (!task)
45553 goto out;
45554
45555+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45556+ goto out_put_task;
45557+
45558 result = proc_pid_instantiate(dir, dentry, task, NULL);
45559+out_put_task:
45560 put_task_struct(task);
45561 out:
45562 return result;
45563@@ -2811,6 +2940,11 @@ int proc_pid_readdir(struct file * filp,
45564 {
45565 unsigned int nr;
45566 struct task_struct *reaper;
45567+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45568+ const struct cred *tmpcred = current_cred();
45569+ const struct cred *itercred;
45570+#endif
45571+ filldir_t __filldir = filldir;
45572 struct tgid_iter iter;
45573 struct pid_namespace *ns;
45574
45575@@ -2834,8 +2968,27 @@ int proc_pid_readdir(struct file * filp,
45576 for (iter = next_tgid(ns, iter);
45577 iter.task;
45578 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45579+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45580+ rcu_read_lock();
45581+ itercred = __task_cred(iter.task);
45582+#endif
45583+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45584+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45585+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45586+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45587+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45588+#endif
45589+ )
45590+#endif
45591+ )
45592+ __filldir = &gr_fake_filldir;
45593+ else
45594+ __filldir = filldir;
45595+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45596+ rcu_read_unlock();
45597+#endif
45598 filp->f_pos = iter.tgid + TGID_OFFSET;
45599- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45600+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45601 put_task_struct(iter.task);
45602 goto out;
45603 }
45604@@ -2861,7 +3014,7 @@ static const struct pid_entry tid_base_s
45605 #ifdef CONFIG_SCHED_DEBUG
45606 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45607 #endif
45608-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45609+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45610 INF("syscall", S_IRUSR, proc_pid_syscall),
45611 #endif
45612 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45613@@ -2885,10 +3038,10 @@ static const struct pid_entry tid_base_s
45614 #ifdef CONFIG_SECURITY
45615 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45616 #endif
45617-#ifdef CONFIG_KALLSYMS
45618+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45619 INF("wchan", S_IRUGO, proc_pid_wchan),
45620 #endif
45621-#ifdef CONFIG_STACKTRACE
45622+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45623 ONE("stack", S_IRUSR, proc_pid_stack),
45624 #endif
45625 #ifdef CONFIG_SCHEDSTATS
45626diff -urNp linux-2.6.32.45/fs/proc/cmdline.c linux-2.6.32.45/fs/proc/cmdline.c
45627--- linux-2.6.32.45/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
45628+++ linux-2.6.32.45/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
45629@@ -23,7 +23,11 @@ static const struct file_operations cmdl
45630
45631 static int __init proc_cmdline_init(void)
45632 {
45633+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45634+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45635+#else
45636 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45637+#endif
45638 return 0;
45639 }
45640 module_init(proc_cmdline_init);
45641diff -urNp linux-2.6.32.45/fs/proc/devices.c linux-2.6.32.45/fs/proc/devices.c
45642--- linux-2.6.32.45/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
45643+++ linux-2.6.32.45/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
45644@@ -64,7 +64,11 @@ static const struct file_operations proc
45645
45646 static int __init proc_devices_init(void)
45647 {
45648+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45649+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45650+#else
45651 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45652+#endif
45653 return 0;
45654 }
45655 module_init(proc_devices_init);
45656diff -urNp linux-2.6.32.45/fs/proc/inode.c linux-2.6.32.45/fs/proc/inode.c
45657--- linux-2.6.32.45/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
45658+++ linux-2.6.32.45/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
45659@@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
45660 if (de->mode) {
45661 inode->i_mode = de->mode;
45662 inode->i_uid = de->uid;
45663+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45664+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45665+#else
45666 inode->i_gid = de->gid;
45667+#endif
45668 }
45669 if (de->size)
45670 inode->i_size = de->size;
45671diff -urNp linux-2.6.32.45/fs/proc/internal.h linux-2.6.32.45/fs/proc/internal.h
45672--- linux-2.6.32.45/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
45673+++ linux-2.6.32.45/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
45674@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
45675 struct pid *pid, struct task_struct *task);
45676 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45677 struct pid *pid, struct task_struct *task);
45678+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45679+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45680+#endif
45681 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45682
45683 extern const struct file_operations proc_maps_operations;
45684diff -urNp linux-2.6.32.45/fs/proc/Kconfig linux-2.6.32.45/fs/proc/Kconfig
45685--- linux-2.6.32.45/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
45686+++ linux-2.6.32.45/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
45687@@ -30,12 +30,12 @@ config PROC_FS
45688
45689 config PROC_KCORE
45690 bool "/proc/kcore support" if !ARM
45691- depends on PROC_FS && MMU
45692+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45693
45694 config PROC_VMCORE
45695 bool "/proc/vmcore support (EXPERIMENTAL)"
45696- depends on PROC_FS && CRASH_DUMP
45697- default y
45698+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45699+ default n
45700 help
45701 Exports the dump image of crashed kernel in ELF format.
45702
45703@@ -59,8 +59,8 @@ config PROC_SYSCTL
45704 limited in memory.
45705
45706 config PROC_PAGE_MONITOR
45707- default y
45708- depends on PROC_FS && MMU
45709+ default n
45710+ depends on PROC_FS && MMU && !GRKERNSEC
45711 bool "Enable /proc page monitoring" if EMBEDDED
45712 help
45713 Various /proc files exist to monitor process memory utilization:
45714diff -urNp linux-2.6.32.45/fs/proc/kcore.c linux-2.6.32.45/fs/proc/kcore.c
45715--- linux-2.6.32.45/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
45716+++ linux-2.6.32.45/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
45717@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
45718 off_t offset = 0;
45719 struct kcore_list *m;
45720
45721+ pax_track_stack();
45722+
45723 /* setup ELF header */
45724 elf = (struct elfhdr *) bufp;
45725 bufp += sizeof(struct elfhdr);
45726@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
45727 * the addresses in the elf_phdr on our list.
45728 */
45729 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45730- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45731+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45732+ if (tsz > buflen)
45733 tsz = buflen;
45734-
45735+
45736 while (buflen) {
45737 struct kcore_list *m;
45738
45739@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
45740 kfree(elf_buf);
45741 } else {
45742 if (kern_addr_valid(start)) {
45743- unsigned long n;
45744+ char *elf_buf;
45745+ mm_segment_t oldfs;
45746
45747- n = copy_to_user(buffer, (char *)start, tsz);
45748- /*
45749- * We cannot distingush between fault on source
45750- * and fault on destination. When this happens
45751- * we clear too and hope it will trigger the
45752- * EFAULT again.
45753- */
45754- if (n) {
45755- if (clear_user(buffer + tsz - n,
45756- n))
45757+ elf_buf = kmalloc(tsz, GFP_KERNEL);
45758+ if (!elf_buf)
45759+ return -ENOMEM;
45760+ oldfs = get_fs();
45761+ set_fs(KERNEL_DS);
45762+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
45763+ set_fs(oldfs);
45764+ if (copy_to_user(buffer, elf_buf, tsz)) {
45765+ kfree(elf_buf);
45766 return -EFAULT;
45767+ }
45768 }
45769+ set_fs(oldfs);
45770+ kfree(elf_buf);
45771 } else {
45772 if (clear_user(buffer, tsz))
45773 return -EFAULT;
45774@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
45775
45776 static int open_kcore(struct inode *inode, struct file *filp)
45777 {
45778+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
45779+ return -EPERM;
45780+#endif
45781 if (!capable(CAP_SYS_RAWIO))
45782 return -EPERM;
45783 if (kcore_need_update)
45784diff -urNp linux-2.6.32.45/fs/proc/meminfo.c linux-2.6.32.45/fs/proc/meminfo.c
45785--- linux-2.6.32.45/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
45786+++ linux-2.6.32.45/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
45787@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
45788 unsigned long pages[NR_LRU_LISTS];
45789 int lru;
45790
45791+ pax_track_stack();
45792+
45793 /*
45794 * display in kilobytes.
45795 */
45796@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
45797 vmi.used >> 10,
45798 vmi.largest_chunk >> 10
45799 #ifdef CONFIG_MEMORY_FAILURE
45800- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
45801+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
45802 #endif
45803 );
45804
45805diff -urNp linux-2.6.32.45/fs/proc/nommu.c linux-2.6.32.45/fs/proc/nommu.c
45806--- linux-2.6.32.45/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
45807+++ linux-2.6.32.45/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
45808@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
45809 if (len < 1)
45810 len = 1;
45811 seq_printf(m, "%*c", len, ' ');
45812- seq_path(m, &file->f_path, "");
45813+ seq_path(m, &file->f_path, "\n\\");
45814 }
45815
45816 seq_putc(m, '\n');
45817diff -urNp linux-2.6.32.45/fs/proc/proc_net.c linux-2.6.32.45/fs/proc/proc_net.c
45818--- linux-2.6.32.45/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
45819+++ linux-2.6.32.45/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
45820@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
45821 struct task_struct *task;
45822 struct nsproxy *ns;
45823 struct net *net = NULL;
45824+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45825+ const struct cred *cred = current_cred();
45826+#endif
45827+
45828+#ifdef CONFIG_GRKERNSEC_PROC_USER
45829+ if (cred->fsuid)
45830+ return net;
45831+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45832+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
45833+ return net;
45834+#endif
45835
45836 rcu_read_lock();
45837 task = pid_task(proc_pid(dir), PIDTYPE_PID);
45838diff -urNp linux-2.6.32.45/fs/proc/proc_sysctl.c linux-2.6.32.45/fs/proc/proc_sysctl.c
45839--- linux-2.6.32.45/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
45840+++ linux-2.6.32.45/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
45841@@ -7,6 +7,8 @@
45842 #include <linux/security.h>
45843 #include "internal.h"
45844
45845+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
45846+
45847 static const struct dentry_operations proc_sys_dentry_operations;
45848 static const struct file_operations proc_sys_file_operations;
45849 static const struct inode_operations proc_sys_inode_operations;
45850@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
45851 if (!p)
45852 goto out;
45853
45854+ if (gr_handle_sysctl(p, MAY_EXEC))
45855+ goto out;
45856+
45857 err = ERR_PTR(-ENOMEM);
45858 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
45859 if (h)
45860@@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
45861 if (*pos < file->f_pos)
45862 continue;
45863
45864+ if (gr_handle_sysctl(table, 0))
45865+ continue;
45866+
45867 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
45868 if (res)
45869 return res;
45870@@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
45871 if (IS_ERR(head))
45872 return PTR_ERR(head);
45873
45874+ if (table && gr_handle_sysctl(table, MAY_EXEC))
45875+ return -ENOENT;
45876+
45877 generic_fillattr(inode, stat);
45878 if (table)
45879 stat->mode = (stat->mode & S_IFMT) | table->mode;
45880diff -urNp linux-2.6.32.45/fs/proc/root.c linux-2.6.32.45/fs/proc/root.c
45881--- linux-2.6.32.45/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
45882+++ linux-2.6.32.45/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
45883@@ -134,7 +134,15 @@ void __init proc_root_init(void)
45884 #ifdef CONFIG_PROC_DEVICETREE
45885 proc_device_tree_init();
45886 #endif
45887+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45888+#ifdef CONFIG_GRKERNSEC_PROC_USER
45889+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
45890+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45891+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
45892+#endif
45893+#else
45894 proc_mkdir("bus", NULL);
45895+#endif
45896 proc_sys_init();
45897 }
45898
45899diff -urNp linux-2.6.32.45/fs/proc/task_mmu.c linux-2.6.32.45/fs/proc/task_mmu.c
45900--- linux-2.6.32.45/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
45901+++ linux-2.6.32.45/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
45902@@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
45903 "VmStk:\t%8lu kB\n"
45904 "VmExe:\t%8lu kB\n"
45905 "VmLib:\t%8lu kB\n"
45906- "VmPTE:\t%8lu kB\n",
45907- hiwater_vm << (PAGE_SHIFT-10),
45908+ "VmPTE:\t%8lu kB\n"
45909+
45910+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45911+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
45912+#endif
45913+
45914+ ,hiwater_vm << (PAGE_SHIFT-10),
45915 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
45916 mm->locked_vm << (PAGE_SHIFT-10),
45917 hiwater_rss << (PAGE_SHIFT-10),
45918 total_rss << (PAGE_SHIFT-10),
45919 data << (PAGE_SHIFT-10),
45920 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
45921- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
45922+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
45923+
45924+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45925+ , mm->context.user_cs_base, mm->context.user_cs_limit
45926+#endif
45927+
45928+ );
45929 }
45930
45931 unsigned long task_vsize(struct mm_struct *mm)
45932@@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
45933 struct proc_maps_private *priv = m->private;
45934 struct vm_area_struct *vma = v;
45935
45936- vma_stop(priv, vma);
45937+ if (!IS_ERR(vma))
45938+ vma_stop(priv, vma);
45939 if (priv->task)
45940 put_task_struct(priv->task);
45941 }
45942@@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
45943 return ret;
45944 }
45945
45946+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45947+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45948+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45949+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45950+#endif
45951+
45952 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
45953 {
45954 struct mm_struct *mm = vma->vm_mm;
45955@@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
45956 int flags = vma->vm_flags;
45957 unsigned long ino = 0;
45958 unsigned long long pgoff = 0;
45959- unsigned long start;
45960 dev_t dev = 0;
45961 int len;
45962
45963@@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
45964 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
45965 }
45966
45967- /* We don't show the stack guard page in /proc/maps */
45968- start = vma->vm_start;
45969- if (vma->vm_flags & VM_GROWSDOWN)
45970- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
45971- start += PAGE_SIZE;
45972-
45973 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
45974- start,
45975+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45976+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
45977+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
45978+#else
45979+ vma->vm_start,
45980 vma->vm_end,
45981+#endif
45982 flags & VM_READ ? 'r' : '-',
45983 flags & VM_WRITE ? 'w' : '-',
45984 flags & VM_EXEC ? 'x' : '-',
45985 flags & VM_MAYSHARE ? 's' : 'p',
45986+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45987+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
45988+#else
45989 pgoff,
45990+#endif
45991 MAJOR(dev), MINOR(dev), ino, &len);
45992
45993 /*
45994@@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
45995 */
45996 if (file) {
45997 pad_len_spaces(m, len);
45998- seq_path(m, &file->f_path, "\n");
45999+ seq_path(m, &file->f_path, "\n\\");
46000 } else {
46001 const char *name = arch_vma_name(vma);
46002 if (!name) {
46003@@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
46004 if (vma->vm_start <= mm->brk &&
46005 vma->vm_end >= mm->start_brk) {
46006 name = "[heap]";
46007- } else if (vma->vm_start <= mm->start_stack &&
46008- vma->vm_end >= mm->start_stack) {
46009+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46010+ (vma->vm_start <= mm->start_stack &&
46011+ vma->vm_end >= mm->start_stack)) {
46012 name = "[stack]";
46013 }
46014 } else {
46015@@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
46016 };
46017
46018 memset(&mss, 0, sizeof mss);
46019- mss.vma = vma;
46020- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46021- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46022+
46023+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46024+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46025+#endif
46026+ mss.vma = vma;
46027+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46028+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46029+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46030+ }
46031+#endif
46032
46033 show_map_vma(m, vma);
46034
46035@@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
46036 "Swap: %8lu kB\n"
46037 "KernelPageSize: %8lu kB\n"
46038 "MMUPageSize: %8lu kB\n",
46039+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46040+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46041+#else
46042 (vma->vm_end - vma->vm_start) >> 10,
46043+#endif
46044 mss.resident >> 10,
46045 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46046 mss.shared_clean >> 10,
46047diff -urNp linux-2.6.32.45/fs/proc/task_nommu.c linux-2.6.32.45/fs/proc/task_nommu.c
46048--- linux-2.6.32.45/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
46049+++ linux-2.6.32.45/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
46050@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
46051 else
46052 bytes += kobjsize(mm);
46053
46054- if (current->fs && current->fs->users > 1)
46055+ if (current->fs && atomic_read(&current->fs->users) > 1)
46056 sbytes += kobjsize(current->fs);
46057 else
46058 bytes += kobjsize(current->fs);
46059@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
46060 if (len < 1)
46061 len = 1;
46062 seq_printf(m, "%*c", len, ' ');
46063- seq_path(m, &file->f_path, "");
46064+ seq_path(m, &file->f_path, "\n\\");
46065 }
46066
46067 seq_putc(m, '\n');
46068diff -urNp linux-2.6.32.45/fs/readdir.c linux-2.6.32.45/fs/readdir.c
46069--- linux-2.6.32.45/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
46070+++ linux-2.6.32.45/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
46071@@ -16,6 +16,7 @@
46072 #include <linux/security.h>
46073 #include <linux/syscalls.h>
46074 #include <linux/unistd.h>
46075+#include <linux/namei.h>
46076
46077 #include <asm/uaccess.h>
46078
46079@@ -67,6 +68,7 @@ struct old_linux_dirent {
46080
46081 struct readdir_callback {
46082 struct old_linux_dirent __user * dirent;
46083+ struct file * file;
46084 int result;
46085 };
46086
46087@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
46088 buf->result = -EOVERFLOW;
46089 return -EOVERFLOW;
46090 }
46091+
46092+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46093+ return 0;
46094+
46095 buf->result++;
46096 dirent = buf->dirent;
46097 if (!access_ok(VERIFY_WRITE, dirent,
46098@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
46099
46100 buf.result = 0;
46101 buf.dirent = dirent;
46102+ buf.file = file;
46103
46104 error = vfs_readdir(file, fillonedir, &buf);
46105 if (buf.result)
46106@@ -142,6 +149,7 @@ struct linux_dirent {
46107 struct getdents_callback {
46108 struct linux_dirent __user * current_dir;
46109 struct linux_dirent __user * previous;
46110+ struct file * file;
46111 int count;
46112 int error;
46113 };
46114@@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
46115 buf->error = -EOVERFLOW;
46116 return -EOVERFLOW;
46117 }
46118+
46119+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46120+ return 0;
46121+
46122 dirent = buf->previous;
46123 if (dirent) {
46124 if (__put_user(offset, &dirent->d_off))
46125@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
46126 buf.previous = NULL;
46127 buf.count = count;
46128 buf.error = 0;
46129+ buf.file = file;
46130
46131 error = vfs_readdir(file, filldir, &buf);
46132 if (error >= 0)
46133@@ -228,6 +241,7 @@ out:
46134 struct getdents_callback64 {
46135 struct linux_dirent64 __user * current_dir;
46136 struct linux_dirent64 __user * previous;
46137+ struct file *file;
46138 int count;
46139 int error;
46140 };
46141@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
46142 buf->error = -EINVAL; /* only used if we fail.. */
46143 if (reclen > buf->count)
46144 return -EINVAL;
46145+
46146+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46147+ return 0;
46148+
46149 dirent = buf->previous;
46150 if (dirent) {
46151 if (__put_user(offset, &dirent->d_off))
46152@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46153
46154 buf.current_dir = dirent;
46155 buf.previous = NULL;
46156+ buf.file = file;
46157 buf.count = count;
46158 buf.error = 0;
46159
46160diff -urNp linux-2.6.32.45/fs/reiserfs/dir.c linux-2.6.32.45/fs/reiserfs/dir.c
46161--- linux-2.6.32.45/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
46162+++ linux-2.6.32.45/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
46163@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
46164 struct reiserfs_dir_entry de;
46165 int ret = 0;
46166
46167+ pax_track_stack();
46168+
46169 reiserfs_write_lock(inode->i_sb);
46170
46171 reiserfs_check_lock_depth(inode->i_sb, "readdir");
46172diff -urNp linux-2.6.32.45/fs/reiserfs/do_balan.c linux-2.6.32.45/fs/reiserfs/do_balan.c
46173--- linux-2.6.32.45/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
46174+++ linux-2.6.32.45/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
46175@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
46176 return;
46177 }
46178
46179- atomic_inc(&(fs_generation(tb->tb_sb)));
46180+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46181 do_balance_starts(tb);
46182
46183 /* balance leaf returns 0 except if combining L R and S into
46184diff -urNp linux-2.6.32.45/fs/reiserfs/item_ops.c linux-2.6.32.45/fs/reiserfs/item_ops.c
46185--- linux-2.6.32.45/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
46186+++ linux-2.6.32.45/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
46187@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
46188 vi->vi_index, vi->vi_type, vi->vi_ih);
46189 }
46190
46191-static struct item_operations stat_data_ops = {
46192+static const struct item_operations stat_data_ops = {
46193 .bytes_number = sd_bytes_number,
46194 .decrement_key = sd_decrement_key,
46195 .is_left_mergeable = sd_is_left_mergeable,
46196@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
46197 vi->vi_index, vi->vi_type, vi->vi_ih);
46198 }
46199
46200-static struct item_operations direct_ops = {
46201+static const struct item_operations direct_ops = {
46202 .bytes_number = direct_bytes_number,
46203 .decrement_key = direct_decrement_key,
46204 .is_left_mergeable = direct_is_left_mergeable,
46205@@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
46206 vi->vi_index, vi->vi_type, vi->vi_ih);
46207 }
46208
46209-static struct item_operations indirect_ops = {
46210+static const struct item_operations indirect_ops = {
46211 .bytes_number = indirect_bytes_number,
46212 .decrement_key = indirect_decrement_key,
46213 .is_left_mergeable = indirect_is_left_mergeable,
46214@@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
46215 printk("\n");
46216 }
46217
46218-static struct item_operations direntry_ops = {
46219+static const struct item_operations direntry_ops = {
46220 .bytes_number = direntry_bytes_number,
46221 .decrement_key = direntry_decrement_key,
46222 .is_left_mergeable = direntry_is_left_mergeable,
46223@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
46224 "Invalid item type observed, run fsck ASAP");
46225 }
46226
46227-static struct item_operations errcatch_ops = {
46228+static const struct item_operations errcatch_ops = {
46229 errcatch_bytes_number,
46230 errcatch_decrement_key,
46231 errcatch_is_left_mergeable,
46232@@ -746,7 +746,7 @@ static struct item_operations errcatch_o
46233 #error Item types must use disk-format assigned values.
46234 #endif
46235
46236-struct item_operations *item_ops[TYPE_ANY + 1] = {
46237+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
46238 &stat_data_ops,
46239 &indirect_ops,
46240 &direct_ops,
46241diff -urNp linux-2.6.32.45/fs/reiserfs/journal.c linux-2.6.32.45/fs/reiserfs/journal.c
46242--- linux-2.6.32.45/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
46243+++ linux-2.6.32.45/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
46244@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
46245 struct buffer_head *bh;
46246 int i, j;
46247
46248+ pax_track_stack();
46249+
46250 bh = __getblk(dev, block, bufsize);
46251 if (buffer_uptodate(bh))
46252 return (bh);
46253diff -urNp linux-2.6.32.45/fs/reiserfs/namei.c linux-2.6.32.45/fs/reiserfs/namei.c
46254--- linux-2.6.32.45/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
46255+++ linux-2.6.32.45/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
46256@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
46257 unsigned long savelink = 1;
46258 struct timespec ctime;
46259
46260+ pax_track_stack();
46261+
46262 /* three balancings: (1) old name removal, (2) new name insertion
46263 and (3) maybe "save" link insertion
46264 stat data updates: (1) old directory,
46265diff -urNp linux-2.6.32.45/fs/reiserfs/procfs.c linux-2.6.32.45/fs/reiserfs/procfs.c
46266--- linux-2.6.32.45/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
46267+++ linux-2.6.32.45/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
46268@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
46269 "SMALL_TAILS " : "NO_TAILS ",
46270 replay_only(sb) ? "REPLAY_ONLY " : "",
46271 convert_reiserfs(sb) ? "CONV " : "",
46272- atomic_read(&r->s_generation_counter),
46273+ atomic_read_unchecked(&r->s_generation_counter),
46274 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46275 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46276 SF(s_good_search_by_key_reada), SF(s_bmaps),
46277@@ -309,6 +309,8 @@ static int show_journal(struct seq_file
46278 struct journal_params *jp = &rs->s_v1.s_journal;
46279 char b[BDEVNAME_SIZE];
46280
46281+ pax_track_stack();
46282+
46283 seq_printf(m, /* on-disk fields */
46284 "jp_journal_1st_block: \t%i\n"
46285 "jp_journal_dev: \t%s[%x]\n"
46286diff -urNp linux-2.6.32.45/fs/reiserfs/stree.c linux-2.6.32.45/fs/reiserfs/stree.c
46287--- linux-2.6.32.45/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
46288+++ linux-2.6.32.45/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
46289@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
46290 int iter = 0;
46291 #endif
46292
46293+ pax_track_stack();
46294+
46295 BUG_ON(!th->t_trans_id);
46296
46297 init_tb_struct(th, &s_del_balance, sb, path,
46298@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
46299 int retval;
46300 int quota_cut_bytes = 0;
46301
46302+ pax_track_stack();
46303+
46304 BUG_ON(!th->t_trans_id);
46305
46306 le_key2cpu_key(&cpu_key, key);
46307@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
46308 int quota_cut_bytes;
46309 loff_t tail_pos = 0;
46310
46311+ pax_track_stack();
46312+
46313 BUG_ON(!th->t_trans_id);
46314
46315 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
46316@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
46317 int retval;
46318 int fs_gen;
46319
46320+ pax_track_stack();
46321+
46322 BUG_ON(!th->t_trans_id);
46323
46324 fs_gen = get_generation(inode->i_sb);
46325@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
46326 int fs_gen = 0;
46327 int quota_bytes = 0;
46328
46329+ pax_track_stack();
46330+
46331 BUG_ON(!th->t_trans_id);
46332
46333 if (inode) { /* Do we count quotas for item? */
46334diff -urNp linux-2.6.32.45/fs/reiserfs/super.c linux-2.6.32.45/fs/reiserfs/super.c
46335--- linux-2.6.32.45/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
46336+++ linux-2.6.32.45/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
46337@@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
46338 {.option_name = NULL}
46339 };
46340
46341+ pax_track_stack();
46342+
46343 *blocks = 0;
46344 if (!options || !*options)
46345 /* use default configuration: create tails, journaling on, no
46346diff -urNp linux-2.6.32.45/fs/select.c linux-2.6.32.45/fs/select.c
46347--- linux-2.6.32.45/fs/select.c 2011-03-27 14:31:47.000000000 -0400
46348+++ linux-2.6.32.45/fs/select.c 2011-05-16 21:46:57.000000000 -0400
46349@@ -20,6 +20,7 @@
46350 #include <linux/module.h>
46351 #include <linux/slab.h>
46352 #include <linux/poll.h>
46353+#include <linux/security.h>
46354 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46355 #include <linux/file.h>
46356 #include <linux/fdtable.h>
46357@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
46358 int retval, i, timed_out = 0;
46359 unsigned long slack = 0;
46360
46361+ pax_track_stack();
46362+
46363 rcu_read_lock();
46364 retval = max_select_fd(n, fds);
46365 rcu_read_unlock();
46366@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
46367 /* Allocate small arguments on the stack to save memory and be faster */
46368 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
46369
46370+ pax_track_stack();
46371+
46372 ret = -EINVAL;
46373 if (n < 0)
46374 goto out_nofds;
46375@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
46376 struct poll_list *walk = head;
46377 unsigned long todo = nfds;
46378
46379+ pax_track_stack();
46380+
46381+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46382 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
46383 return -EINVAL;
46384
46385diff -urNp linux-2.6.32.45/fs/seq_file.c linux-2.6.32.45/fs/seq_file.c
46386--- linux-2.6.32.45/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
46387+++ linux-2.6.32.45/fs/seq_file.c 2011-08-05 20:33:55.000000000 -0400
46388@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
46389 return 0;
46390 }
46391 if (!m->buf) {
46392- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46393+ m->size = PAGE_SIZE;
46394+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46395 if (!m->buf)
46396 return -ENOMEM;
46397 }
46398@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
46399 Eoverflow:
46400 m->op->stop(m, p);
46401 kfree(m->buf);
46402- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46403+ m->size <<= 1;
46404+ m->buf = kmalloc(m->size, GFP_KERNEL);
46405 return !m->buf ? -ENOMEM : -EAGAIN;
46406 }
46407
46408@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
46409 m->version = file->f_version;
46410 /* grab buffer if we didn't have one */
46411 if (!m->buf) {
46412- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46413+ m->size = PAGE_SIZE;
46414+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46415 if (!m->buf)
46416 goto Enomem;
46417 }
46418@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
46419 goto Fill;
46420 m->op->stop(m, p);
46421 kfree(m->buf);
46422- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46423+ m->size <<= 1;
46424+ m->buf = kmalloc(m->size, GFP_KERNEL);
46425 if (!m->buf)
46426 goto Enomem;
46427 m->count = 0;
46428@@ -555,10 +559,10 @@ int single_open(struct file *file, int (
46429 int res = -ENOMEM;
46430
46431 if (op) {
46432- op->start = single_start;
46433- op->next = single_next;
46434- op->stop = single_stop;
46435- op->show = show;
46436+ *(void **)&op->start = single_start;
46437+ *(void **)&op->next = single_next;
46438+ *(void **)&op->stop = single_stop;
46439+ *(void **)&op->show = show;
46440 res = seq_open(file, op);
46441 if (!res)
46442 ((struct seq_file *)file->private_data)->private = data;
46443diff -urNp linux-2.6.32.45/fs/smbfs/proc.c linux-2.6.32.45/fs/smbfs/proc.c
46444--- linux-2.6.32.45/fs/smbfs/proc.c 2011-03-27 14:31:47.000000000 -0400
46445+++ linux-2.6.32.45/fs/smbfs/proc.c 2011-08-05 20:33:55.000000000 -0400
46446@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *
46447
46448 out:
46449 if (server->local_nls != NULL && server->remote_nls != NULL)
46450- server->ops->convert = convert_cp;
46451+ *(void **)&server->ops->convert = convert_cp;
46452 else
46453- server->ops->convert = convert_memcpy;
46454+ *(void **)&server->ops->convert = convert_memcpy;
46455
46456 smb_unlock_server(server);
46457 return n;
46458@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server,
46459
46460 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
46461 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
46462- server->ops->getattr = smb_proc_getattr_core;
46463+ *(void **)&server->ops->getattr = smb_proc_getattr_core;
46464 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
46465- server->ops->getattr = smb_proc_getattr_ff;
46466+ *(void **)&server->ops->getattr = smb_proc_getattr_ff;
46467 }
46468
46469 /* Decode server capabilities */
46470@@ -3439,7 +3439,7 @@ out:
46471 static void
46472 install_ops(struct smb_ops *dst, struct smb_ops *src)
46473 {
46474- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46475+ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46476 }
46477
46478 /* < LANMAN2 */
46479diff -urNp linux-2.6.32.45/fs/smbfs/symlink.c linux-2.6.32.45/fs/smbfs/symlink.c
46480--- linux-2.6.32.45/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46481+++ linux-2.6.32.45/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46482@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
46483
46484 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46485 {
46486- char *s = nd_get_link(nd);
46487+ const char *s = nd_get_link(nd);
46488 if (!IS_ERR(s))
46489 __putname(s);
46490 }
46491diff -urNp linux-2.6.32.45/fs/splice.c linux-2.6.32.45/fs/splice.c
46492--- linux-2.6.32.45/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
46493+++ linux-2.6.32.45/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
46494@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
46495 pipe_lock(pipe);
46496
46497 for (;;) {
46498- if (!pipe->readers) {
46499+ if (!atomic_read(&pipe->readers)) {
46500 send_sig(SIGPIPE, current, 0);
46501 if (!ret)
46502 ret = -EPIPE;
46503@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
46504 do_wakeup = 0;
46505 }
46506
46507- pipe->waiting_writers++;
46508+ atomic_inc(&pipe->waiting_writers);
46509 pipe_wait(pipe);
46510- pipe->waiting_writers--;
46511+ atomic_dec(&pipe->waiting_writers);
46512 }
46513
46514 pipe_unlock(pipe);
46515@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
46516 .spd_release = spd_release_page,
46517 };
46518
46519+ pax_track_stack();
46520+
46521 index = *ppos >> PAGE_CACHE_SHIFT;
46522 loff = *ppos & ~PAGE_CACHE_MASK;
46523 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46524@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
46525 old_fs = get_fs();
46526 set_fs(get_ds());
46527 /* The cast to a user pointer is valid due to the set_fs() */
46528- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46529+ res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
46530 set_fs(old_fs);
46531
46532 return res;
46533@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
46534 old_fs = get_fs();
46535 set_fs(get_ds());
46536 /* The cast to a user pointer is valid due to the set_fs() */
46537- res = vfs_write(file, (const char __user *)buf, count, &pos);
46538+ res = vfs_write(file, (__force const char __user *)buf, count, &pos);
46539 set_fs(old_fs);
46540
46541 return res;
46542@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
46543 .spd_release = spd_release_page,
46544 };
46545
46546+ pax_track_stack();
46547+
46548 index = *ppos >> PAGE_CACHE_SHIFT;
46549 offset = *ppos & ~PAGE_CACHE_MASK;
46550 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46551@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
46552 goto err;
46553
46554 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46555- vec[i].iov_base = (void __user *) page_address(page);
46556+ vec[i].iov_base = (__force void __user *) page_address(page);
46557 vec[i].iov_len = this_len;
46558 pages[i] = page;
46559 spd.nr_pages++;
46560@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46561 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46562 {
46563 while (!pipe->nrbufs) {
46564- if (!pipe->writers)
46565+ if (!atomic_read(&pipe->writers))
46566 return 0;
46567
46568- if (!pipe->waiting_writers && sd->num_spliced)
46569+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46570 return 0;
46571
46572 if (sd->flags & SPLICE_F_NONBLOCK)
46573@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
46574 * out of the pipe right after the splice_to_pipe(). So set
46575 * PIPE_READERS appropriately.
46576 */
46577- pipe->readers = 1;
46578+ atomic_set(&pipe->readers, 1);
46579
46580 current->splice_pipe = pipe;
46581 }
46582@@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
46583 .spd_release = spd_release_page,
46584 };
46585
46586+ pax_track_stack();
46587+
46588 pipe = pipe_info(file->f_path.dentry->d_inode);
46589 if (!pipe)
46590 return -EBADF;
46591@@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
46592 ret = -ERESTARTSYS;
46593 break;
46594 }
46595- if (!pipe->writers)
46596+ if (!atomic_read(&pipe->writers))
46597 break;
46598- if (!pipe->waiting_writers) {
46599+ if (!atomic_read(&pipe->waiting_writers)) {
46600 if (flags & SPLICE_F_NONBLOCK) {
46601 ret = -EAGAIN;
46602 break;
46603@@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
46604 pipe_lock(pipe);
46605
46606 while (pipe->nrbufs >= PIPE_BUFFERS) {
46607- if (!pipe->readers) {
46608+ if (!atomic_read(&pipe->readers)) {
46609 send_sig(SIGPIPE, current, 0);
46610 ret = -EPIPE;
46611 break;
46612@@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
46613 ret = -ERESTARTSYS;
46614 break;
46615 }
46616- pipe->waiting_writers++;
46617+ atomic_inc(&pipe->waiting_writers);
46618 pipe_wait(pipe);
46619- pipe->waiting_writers--;
46620+ atomic_dec(&pipe->waiting_writers);
46621 }
46622
46623 pipe_unlock(pipe);
46624@@ -1785,14 +1791,14 @@ retry:
46625 pipe_double_lock(ipipe, opipe);
46626
46627 do {
46628- if (!opipe->readers) {
46629+ if (!atomic_read(&opipe->readers)) {
46630 send_sig(SIGPIPE, current, 0);
46631 if (!ret)
46632 ret = -EPIPE;
46633 break;
46634 }
46635
46636- if (!ipipe->nrbufs && !ipipe->writers)
46637+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46638 break;
46639
46640 /*
46641@@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
46642 pipe_double_lock(ipipe, opipe);
46643
46644 do {
46645- if (!opipe->readers) {
46646+ if (!atomic_read(&opipe->readers)) {
46647 send_sig(SIGPIPE, current, 0);
46648 if (!ret)
46649 ret = -EPIPE;
46650@@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
46651 * return EAGAIN if we have the potential of some data in the
46652 * future, otherwise just return 0
46653 */
46654- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46655+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46656 ret = -EAGAIN;
46657
46658 pipe_unlock(ipipe);
46659diff -urNp linux-2.6.32.45/fs/sysfs/file.c linux-2.6.32.45/fs/sysfs/file.c
46660--- linux-2.6.32.45/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
46661+++ linux-2.6.32.45/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
46662@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46663
46664 struct sysfs_open_dirent {
46665 atomic_t refcnt;
46666- atomic_t event;
46667+ atomic_unchecked_t event;
46668 wait_queue_head_t poll;
46669 struct list_head buffers; /* goes through sysfs_buffer.list */
46670 };
46671@@ -53,7 +53,7 @@ struct sysfs_buffer {
46672 size_t count;
46673 loff_t pos;
46674 char * page;
46675- struct sysfs_ops * ops;
46676+ const struct sysfs_ops * ops;
46677 struct mutex mutex;
46678 int needs_read_fill;
46679 int event;
46680@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
46681 {
46682 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46683 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46684- struct sysfs_ops * ops = buffer->ops;
46685+ const struct sysfs_ops * ops = buffer->ops;
46686 int ret = 0;
46687 ssize_t count;
46688
46689@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
46690 if (!sysfs_get_active_two(attr_sd))
46691 return -ENODEV;
46692
46693- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46694+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46695 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46696
46697 sysfs_put_active_two(attr_sd);
46698@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
46699 {
46700 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46701 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46702- struct sysfs_ops * ops = buffer->ops;
46703+ const struct sysfs_ops * ops = buffer->ops;
46704 int rc;
46705
46706 /* need attr_sd for attr and ops, its parent for kobj */
46707@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
46708 return -ENOMEM;
46709
46710 atomic_set(&new_od->refcnt, 0);
46711- atomic_set(&new_od->event, 1);
46712+ atomic_set_unchecked(&new_od->event, 1);
46713 init_waitqueue_head(&new_od->poll);
46714 INIT_LIST_HEAD(&new_od->buffers);
46715 goto retry;
46716@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
46717 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
46718 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46719 struct sysfs_buffer *buffer;
46720- struct sysfs_ops *ops;
46721+ const struct sysfs_ops *ops;
46722 int error = -EACCES;
46723 char *p;
46724
46725@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
46726
46727 sysfs_put_active_two(attr_sd);
46728
46729- if (buffer->event != atomic_read(&od->event))
46730+ if (buffer->event != atomic_read_unchecked(&od->event))
46731 goto trigger;
46732
46733 return DEFAULT_POLLMASK;
46734@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
46735
46736 od = sd->s_attr.open;
46737 if (od) {
46738- atomic_inc(&od->event);
46739+ atomic_inc_unchecked(&od->event);
46740 wake_up_interruptible(&od->poll);
46741 }
46742
46743diff -urNp linux-2.6.32.45/fs/sysfs/mount.c linux-2.6.32.45/fs/sysfs/mount.c
46744--- linux-2.6.32.45/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
46745+++ linux-2.6.32.45/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
46746@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46747 .s_name = "",
46748 .s_count = ATOMIC_INIT(1),
46749 .s_flags = SYSFS_DIR,
46750+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46751+ .s_mode = S_IFDIR | S_IRWXU,
46752+#else
46753 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46754+#endif
46755 .s_ino = 1,
46756 };
46757
46758diff -urNp linux-2.6.32.45/fs/sysfs/symlink.c linux-2.6.32.45/fs/sysfs/symlink.c
46759--- linux-2.6.32.45/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46760+++ linux-2.6.32.45/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46761@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
46762
46763 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46764 {
46765- char *page = nd_get_link(nd);
46766+ const char *page = nd_get_link(nd);
46767 if (!IS_ERR(page))
46768 free_page((unsigned long)page);
46769 }
46770diff -urNp linux-2.6.32.45/fs/udf/balloc.c linux-2.6.32.45/fs/udf/balloc.c
46771--- linux-2.6.32.45/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
46772+++ linux-2.6.32.45/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
46773@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
46774
46775 mutex_lock(&sbi->s_alloc_mutex);
46776 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46777- if (bloc->logicalBlockNum < 0 ||
46778- (bloc->logicalBlockNum + count) >
46779- partmap->s_partition_len) {
46780+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46781 udf_debug("%d < %d || %d + %d > %d\n",
46782 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
46783 count, partmap->s_partition_len);
46784@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
46785
46786 mutex_lock(&sbi->s_alloc_mutex);
46787 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
46788- if (bloc->logicalBlockNum < 0 ||
46789- (bloc->logicalBlockNum + count) >
46790- partmap->s_partition_len) {
46791+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
46792 udf_debug("%d < %d || %d + %d > %d\n",
46793 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
46794 partmap->s_partition_len);
46795diff -urNp linux-2.6.32.45/fs/udf/inode.c linux-2.6.32.45/fs/udf/inode.c
46796--- linux-2.6.32.45/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
46797+++ linux-2.6.32.45/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
46798@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
46799 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
46800 int lastblock = 0;
46801
46802+ pax_track_stack();
46803+
46804 prev_epos.offset = udf_file_entry_alloc_offset(inode);
46805 prev_epos.block = iinfo->i_location;
46806 prev_epos.bh = NULL;
46807diff -urNp linux-2.6.32.45/fs/udf/misc.c linux-2.6.32.45/fs/udf/misc.c
46808--- linux-2.6.32.45/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
46809+++ linux-2.6.32.45/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
46810@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
46811
46812 u8 udf_tag_checksum(const struct tag *t)
46813 {
46814- u8 *data = (u8 *)t;
46815+ const u8 *data = (const u8 *)t;
46816 u8 checksum = 0;
46817 int i;
46818 for (i = 0; i < sizeof(struct tag); ++i)
46819diff -urNp linux-2.6.32.45/fs/utimes.c linux-2.6.32.45/fs/utimes.c
46820--- linux-2.6.32.45/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
46821+++ linux-2.6.32.45/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
46822@@ -1,6 +1,7 @@
46823 #include <linux/compiler.h>
46824 #include <linux/file.h>
46825 #include <linux/fs.h>
46826+#include <linux/security.h>
46827 #include <linux/linkage.h>
46828 #include <linux/mount.h>
46829 #include <linux/namei.h>
46830@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
46831 goto mnt_drop_write_and_out;
46832 }
46833 }
46834+
46835+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46836+ error = -EACCES;
46837+ goto mnt_drop_write_and_out;
46838+ }
46839+
46840 mutex_lock(&inode->i_mutex);
46841 error = notify_change(path->dentry, &newattrs);
46842 mutex_unlock(&inode->i_mutex);
46843diff -urNp linux-2.6.32.45/fs/xattr_acl.c linux-2.6.32.45/fs/xattr_acl.c
46844--- linux-2.6.32.45/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
46845+++ linux-2.6.32.45/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
46846@@ -17,8 +17,8 @@
46847 struct posix_acl *
46848 posix_acl_from_xattr(const void *value, size_t size)
46849 {
46850- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
46851- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
46852+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
46853+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
46854 int count;
46855 struct posix_acl *acl;
46856 struct posix_acl_entry *acl_e;
46857diff -urNp linux-2.6.32.45/fs/xattr.c linux-2.6.32.45/fs/xattr.c
46858--- linux-2.6.32.45/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
46859+++ linux-2.6.32.45/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
46860@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46861 * Extended attribute SET operations
46862 */
46863 static long
46864-setxattr(struct dentry *d, const char __user *name, const void __user *value,
46865+setxattr(struct path *path, const char __user *name, const void __user *value,
46866 size_t size, int flags)
46867 {
46868 int error;
46869@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
46870 return PTR_ERR(kvalue);
46871 }
46872
46873- error = vfs_setxattr(d, kname, kvalue, size, flags);
46874+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46875+ error = -EACCES;
46876+ goto out;
46877+ }
46878+
46879+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
46880+out:
46881 kfree(kvalue);
46882 return error;
46883 }
46884@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
46885 return error;
46886 error = mnt_want_write(path.mnt);
46887 if (!error) {
46888- error = setxattr(path.dentry, name, value, size, flags);
46889+ error = setxattr(&path, name, value, size, flags);
46890 mnt_drop_write(path.mnt);
46891 }
46892 path_put(&path);
46893@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
46894 return error;
46895 error = mnt_want_write(path.mnt);
46896 if (!error) {
46897- error = setxattr(path.dentry, name, value, size, flags);
46898+ error = setxattr(&path, name, value, size, flags);
46899 mnt_drop_write(path.mnt);
46900 }
46901 path_put(&path);
46902@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
46903 const void __user *,value, size_t, size, int, flags)
46904 {
46905 struct file *f;
46906- struct dentry *dentry;
46907 int error = -EBADF;
46908
46909 f = fget(fd);
46910 if (!f)
46911 return error;
46912- dentry = f->f_path.dentry;
46913- audit_inode(NULL, dentry);
46914+ audit_inode(NULL, f->f_path.dentry);
46915 error = mnt_want_write_file(f);
46916 if (!error) {
46917- error = setxattr(dentry, name, value, size, flags);
46918+ error = setxattr(&f->f_path, name, value, size, flags);
46919 mnt_drop_write(f->f_path.mnt);
46920 }
46921 fput(f);
46922diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c
46923--- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
46924+++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
46925@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
46926 xfs_fsop_geom_t fsgeo;
46927 int error;
46928
46929+ memset(&fsgeo, 0, sizeof(fsgeo));
46930 error = xfs_fs_geometry(mp, &fsgeo, 3);
46931 if (error)
46932 return -error;
46933diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c
46934--- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
46935+++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
46936@@ -134,7 +134,7 @@ xfs_find_handle(
46937 }
46938
46939 error = -EFAULT;
46940- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
46941+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
46942 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
46943 goto out_put;
46944
46945@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
46946 if (IS_ERR(dentry))
46947 return PTR_ERR(dentry);
46948
46949- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
46950+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
46951 if (!kbuf)
46952 goto out_dput;
46953
46954@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
46955 xfs_mount_t *mp,
46956 void __user *arg)
46957 {
46958- xfs_fsop_geom_t fsgeo;
46959+ xfs_fsop_geom_t fsgeo;
46960 int error;
46961
46962 error = xfs_fs_geometry(mp, &fsgeo, 3);
46963diff -urNp linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c
46964--- linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
46965+++ linux-2.6.32.45/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
46966@@ -468,7 +468,7 @@ xfs_vn_put_link(
46967 struct nameidata *nd,
46968 void *p)
46969 {
46970- char *s = nd_get_link(nd);
46971+ const char *s = nd_get_link(nd);
46972
46973 if (!IS_ERR(s))
46974 kfree(s);
46975diff -urNp linux-2.6.32.45/fs/xfs/xfs_bmap.c linux-2.6.32.45/fs/xfs/xfs_bmap.c
46976--- linux-2.6.32.45/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
46977+++ linux-2.6.32.45/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
46978@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
46979 int nmap,
46980 int ret_nmap);
46981 #else
46982-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
46983+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
46984 #endif /* DEBUG */
46985
46986 #if defined(XFS_RW_TRACE)
46987diff -urNp linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c
46988--- linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
46989+++ linux-2.6.32.45/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
46990@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
46991 }
46992
46993 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
46994- if (filldir(dirent, sfep->name, sfep->namelen,
46995+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
46996+ char name[sfep->namelen];
46997+ memcpy(name, sfep->name, sfep->namelen);
46998+ if (filldir(dirent, name, sfep->namelen,
46999+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
47000+ *offset = off & 0x7fffffff;
47001+ return 0;
47002+ }
47003+ } else if (filldir(dirent, sfep->name, sfep->namelen,
47004 off & 0x7fffffff, ino, DT_UNKNOWN)) {
47005 *offset = off & 0x7fffffff;
47006 return 0;
47007diff -urNp linux-2.6.32.45/grsecurity/gracl_alloc.c linux-2.6.32.45/grsecurity/gracl_alloc.c
47008--- linux-2.6.32.45/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
47009+++ linux-2.6.32.45/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
47010@@ -0,0 +1,105 @@
47011+#include <linux/kernel.h>
47012+#include <linux/mm.h>
47013+#include <linux/slab.h>
47014+#include <linux/vmalloc.h>
47015+#include <linux/gracl.h>
47016+#include <linux/grsecurity.h>
47017+
47018+static unsigned long alloc_stack_next = 1;
47019+static unsigned long alloc_stack_size = 1;
47020+static void **alloc_stack;
47021+
47022+static __inline__ int
47023+alloc_pop(void)
47024+{
47025+ if (alloc_stack_next == 1)
47026+ return 0;
47027+
47028+ kfree(alloc_stack[alloc_stack_next - 2]);
47029+
47030+ alloc_stack_next--;
47031+
47032+ return 1;
47033+}
47034+
47035+static __inline__ int
47036+alloc_push(void *buf)
47037+{
47038+ if (alloc_stack_next >= alloc_stack_size)
47039+ return 1;
47040+
47041+ alloc_stack[alloc_stack_next - 1] = buf;
47042+
47043+ alloc_stack_next++;
47044+
47045+ return 0;
47046+}
47047+
47048+void *
47049+acl_alloc(unsigned long len)
47050+{
47051+ void *ret = NULL;
47052+
47053+ if (!len || len > PAGE_SIZE)
47054+ goto out;
47055+
47056+ ret = kmalloc(len, GFP_KERNEL);
47057+
47058+ if (ret) {
47059+ if (alloc_push(ret)) {
47060+ kfree(ret);
47061+ ret = NULL;
47062+ }
47063+ }
47064+
47065+out:
47066+ return ret;
47067+}
47068+
47069+void *
47070+acl_alloc_num(unsigned long num, unsigned long len)
47071+{
47072+ if (!len || (num > (PAGE_SIZE / len)))
47073+ return NULL;
47074+
47075+ return acl_alloc(num * len);
47076+}
47077+
47078+void
47079+acl_free_all(void)
47080+{
47081+ if (gr_acl_is_enabled() || !alloc_stack)
47082+ return;
47083+
47084+ while (alloc_pop()) ;
47085+
47086+ if (alloc_stack) {
47087+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
47088+ kfree(alloc_stack);
47089+ else
47090+ vfree(alloc_stack);
47091+ }
47092+
47093+ alloc_stack = NULL;
47094+ alloc_stack_size = 1;
47095+ alloc_stack_next = 1;
47096+
47097+ return;
47098+}
47099+
47100+int
47101+acl_alloc_stack_init(unsigned long size)
47102+{
47103+ if ((size * sizeof (void *)) <= PAGE_SIZE)
47104+ alloc_stack =
47105+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
47106+ else
47107+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
47108+
47109+ alloc_stack_size = size;
47110+
47111+ if (!alloc_stack)
47112+ return 0;
47113+ else
47114+ return 1;
47115+}
47116diff -urNp linux-2.6.32.45/grsecurity/gracl.c linux-2.6.32.45/grsecurity/gracl.c
47117--- linux-2.6.32.45/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
47118+++ linux-2.6.32.45/grsecurity/gracl.c 2011-07-14 20:02:48.000000000 -0400
47119@@ -0,0 +1,4082 @@
47120+#include <linux/kernel.h>
47121+#include <linux/module.h>
47122+#include <linux/sched.h>
47123+#include <linux/mm.h>
47124+#include <linux/file.h>
47125+#include <linux/fs.h>
47126+#include <linux/namei.h>
47127+#include <linux/mount.h>
47128+#include <linux/tty.h>
47129+#include <linux/proc_fs.h>
47130+#include <linux/smp_lock.h>
47131+#include <linux/slab.h>
47132+#include <linux/vmalloc.h>
47133+#include <linux/types.h>
47134+#include <linux/sysctl.h>
47135+#include <linux/netdevice.h>
47136+#include <linux/ptrace.h>
47137+#include <linux/gracl.h>
47138+#include <linux/gralloc.h>
47139+#include <linux/grsecurity.h>
47140+#include <linux/grinternal.h>
47141+#include <linux/pid_namespace.h>
47142+#include <linux/fdtable.h>
47143+#include <linux/percpu.h>
47144+
47145+#include <asm/uaccess.h>
47146+#include <asm/errno.h>
47147+#include <asm/mman.h>
47148+
47149+static struct acl_role_db acl_role_set;
47150+static struct name_db name_set;
47151+static struct inodev_db inodev_set;
47152+
47153+/* for keeping track of userspace pointers used for subjects, so we
47154+ can share references in the kernel as well
47155+*/
47156+
47157+static struct dentry *real_root;
47158+static struct vfsmount *real_root_mnt;
47159+
47160+static struct acl_subj_map_db subj_map_set;
47161+
47162+static struct acl_role_label *default_role;
47163+
47164+static struct acl_role_label *role_list;
47165+
47166+static u16 acl_sp_role_value;
47167+
47168+extern char *gr_shared_page[4];
47169+static DEFINE_MUTEX(gr_dev_mutex);
47170+DEFINE_RWLOCK(gr_inode_lock);
47171+
47172+struct gr_arg *gr_usermode;
47173+
47174+static unsigned int gr_status __read_only = GR_STATUS_INIT;
47175+
47176+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
47177+extern void gr_clear_learn_entries(void);
47178+
47179+#ifdef CONFIG_GRKERNSEC_RESLOG
47180+extern void gr_log_resource(const struct task_struct *task,
47181+ const int res, const unsigned long wanted, const int gt);
47182+#endif
47183+
47184+unsigned char *gr_system_salt;
47185+unsigned char *gr_system_sum;
47186+
47187+static struct sprole_pw **acl_special_roles = NULL;
47188+static __u16 num_sprole_pws = 0;
47189+
47190+static struct acl_role_label *kernel_role = NULL;
47191+
47192+static unsigned int gr_auth_attempts = 0;
47193+static unsigned long gr_auth_expires = 0UL;
47194+
47195+#ifdef CONFIG_NET
47196+extern struct vfsmount *sock_mnt;
47197+#endif
47198+extern struct vfsmount *pipe_mnt;
47199+extern struct vfsmount *shm_mnt;
47200+#ifdef CONFIG_HUGETLBFS
47201+extern struct vfsmount *hugetlbfs_vfsmount;
47202+#endif
47203+
47204+static struct acl_object_label *fakefs_obj_rw;
47205+static struct acl_object_label *fakefs_obj_rwx;
47206+
47207+extern int gr_init_uidset(void);
47208+extern void gr_free_uidset(void);
47209+extern void gr_remove_uid(uid_t uid);
47210+extern int gr_find_uid(uid_t uid);
47211+
47212+__inline__ int
47213+gr_acl_is_enabled(void)
47214+{
47215+ return (gr_status & GR_READY);
47216+}
47217+
47218+#ifdef CONFIG_BTRFS_FS
47219+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
47220+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
47221+#endif
47222+
47223+static inline dev_t __get_dev(const struct dentry *dentry)
47224+{
47225+#ifdef CONFIG_BTRFS_FS
47226+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
47227+ return get_btrfs_dev_from_inode(dentry->d_inode);
47228+ else
47229+#endif
47230+ return dentry->d_inode->i_sb->s_dev;
47231+}
47232+
47233+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47234+{
47235+ return __get_dev(dentry);
47236+}
47237+
47238+static char gr_task_roletype_to_char(struct task_struct *task)
47239+{
47240+ switch (task->role->roletype &
47241+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
47242+ GR_ROLE_SPECIAL)) {
47243+ case GR_ROLE_DEFAULT:
47244+ return 'D';
47245+ case GR_ROLE_USER:
47246+ return 'U';
47247+ case GR_ROLE_GROUP:
47248+ return 'G';
47249+ case GR_ROLE_SPECIAL:
47250+ return 'S';
47251+ }
47252+
47253+ return 'X';
47254+}
47255+
47256+char gr_roletype_to_char(void)
47257+{
47258+ return gr_task_roletype_to_char(current);
47259+}
47260+
47261+__inline__ int
47262+gr_acl_tpe_check(void)
47263+{
47264+ if (unlikely(!(gr_status & GR_READY)))
47265+ return 0;
47266+ if (current->role->roletype & GR_ROLE_TPE)
47267+ return 1;
47268+ else
47269+ return 0;
47270+}
47271+
47272+int
47273+gr_handle_rawio(const struct inode *inode)
47274+{
47275+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47276+ if (inode && S_ISBLK(inode->i_mode) &&
47277+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47278+ !capable(CAP_SYS_RAWIO))
47279+ return 1;
47280+#endif
47281+ return 0;
47282+}
47283+
47284+static int
47285+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
47286+{
47287+ if (likely(lena != lenb))
47288+ return 0;
47289+
47290+ return !memcmp(a, b, lena);
47291+}
47292+
47293+/* this must be called with vfsmount_lock and dcache_lock held */
47294+
47295+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
47296+ struct dentry *root, struct vfsmount *rootmnt,
47297+ char *buffer, int buflen)
47298+{
47299+ char * end = buffer+buflen;
47300+ char * retval;
47301+ int namelen;
47302+
47303+ *--end = '\0';
47304+ buflen--;
47305+
47306+ if (buflen < 1)
47307+ goto Elong;
47308+ /* Get '/' right */
47309+ retval = end-1;
47310+ *retval = '/';
47311+
47312+ for (;;) {
47313+ struct dentry * parent;
47314+
47315+ if (dentry == root && vfsmnt == rootmnt)
47316+ break;
47317+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
47318+ /* Global root? */
47319+ if (vfsmnt->mnt_parent == vfsmnt)
47320+ goto global_root;
47321+ dentry = vfsmnt->mnt_mountpoint;
47322+ vfsmnt = vfsmnt->mnt_parent;
47323+ continue;
47324+ }
47325+ parent = dentry->d_parent;
47326+ prefetch(parent);
47327+ namelen = dentry->d_name.len;
47328+ buflen -= namelen + 1;
47329+ if (buflen < 0)
47330+ goto Elong;
47331+ end -= namelen;
47332+ memcpy(end, dentry->d_name.name, namelen);
47333+ *--end = '/';
47334+ retval = end;
47335+ dentry = parent;
47336+ }
47337+
47338+out:
47339+ return retval;
47340+
47341+global_root:
47342+ namelen = dentry->d_name.len;
47343+ buflen -= namelen;
47344+ if (buflen < 0)
47345+ goto Elong;
47346+ retval -= namelen-1; /* hit the slash */
47347+ memcpy(retval, dentry->d_name.name, namelen);
47348+ goto out;
47349+Elong:
47350+ retval = ERR_PTR(-ENAMETOOLONG);
47351+ goto out;
47352+}
47353+
47354+static char *
47355+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
47356+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
47357+{
47358+ char *retval;
47359+
47360+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
47361+ if (unlikely(IS_ERR(retval)))
47362+ retval = strcpy(buf, "<path too long>");
47363+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
47364+ retval[1] = '\0';
47365+
47366+ return retval;
47367+}
47368+
47369+static char *
47370+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47371+ char *buf, int buflen)
47372+{
47373+ char *res;
47374+
47375+ /* we can use real_root, real_root_mnt, because this is only called
47376+ by the RBAC system */
47377+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
47378+
47379+ return res;
47380+}
47381+
47382+static char *
47383+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47384+ char *buf, int buflen)
47385+{
47386+ char *res;
47387+ struct dentry *root;
47388+ struct vfsmount *rootmnt;
47389+ struct task_struct *reaper = &init_task;
47390+
47391+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
47392+ read_lock(&reaper->fs->lock);
47393+ root = dget(reaper->fs->root.dentry);
47394+ rootmnt = mntget(reaper->fs->root.mnt);
47395+ read_unlock(&reaper->fs->lock);
47396+
47397+ spin_lock(&dcache_lock);
47398+ spin_lock(&vfsmount_lock);
47399+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
47400+ spin_unlock(&vfsmount_lock);
47401+ spin_unlock(&dcache_lock);
47402+
47403+ dput(root);
47404+ mntput(rootmnt);
47405+ return res;
47406+}
47407+
47408+static char *
47409+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47410+{
47411+ char *ret;
47412+ spin_lock(&dcache_lock);
47413+ spin_lock(&vfsmount_lock);
47414+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47415+ PAGE_SIZE);
47416+ spin_unlock(&vfsmount_lock);
47417+ spin_unlock(&dcache_lock);
47418+ return ret;
47419+}
47420+
47421+char *
47422+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
47423+{
47424+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47425+ PAGE_SIZE);
47426+}
47427+
47428+char *
47429+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
47430+{
47431+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47432+ PAGE_SIZE);
47433+}
47434+
47435+char *
47436+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
47437+{
47438+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
47439+ PAGE_SIZE);
47440+}
47441+
47442+char *
47443+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
47444+{
47445+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
47446+ PAGE_SIZE);
47447+}
47448+
47449+char *
47450+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
47451+{
47452+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
47453+ PAGE_SIZE);
47454+}
47455+
47456+__inline__ __u32
47457+to_gr_audit(const __u32 reqmode)
47458+{
47459+ /* masks off auditable permission flags, then shifts them to create
47460+ auditing flags, and adds the special case of append auditing if
47461+ we're requesting write */
47462+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
47463+}
47464+
47465+struct acl_subject_label *
47466+lookup_subject_map(const struct acl_subject_label *userp)
47467+{
47468+ unsigned int index = shash(userp, subj_map_set.s_size);
47469+ struct subject_map *match;
47470+
47471+ match = subj_map_set.s_hash[index];
47472+
47473+ while (match && match->user != userp)
47474+ match = match->next;
47475+
47476+ if (match != NULL)
47477+ return match->kernel;
47478+ else
47479+ return NULL;
47480+}
47481+
47482+static void
47483+insert_subj_map_entry(struct subject_map *subjmap)
47484+{
47485+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
47486+ struct subject_map **curr;
47487+
47488+ subjmap->prev = NULL;
47489+
47490+ curr = &subj_map_set.s_hash[index];
47491+ if (*curr != NULL)
47492+ (*curr)->prev = subjmap;
47493+
47494+ subjmap->next = *curr;
47495+ *curr = subjmap;
47496+
47497+ return;
47498+}
47499+
47500+static struct acl_role_label *
47501+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
47502+ const gid_t gid)
47503+{
47504+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
47505+ struct acl_role_label *match;
47506+ struct role_allowed_ip *ipp;
47507+ unsigned int x;
47508+ u32 curr_ip = task->signal->curr_ip;
47509+
47510+ task->signal->saved_ip = curr_ip;
47511+
47512+ match = acl_role_set.r_hash[index];
47513+
47514+ while (match) {
47515+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
47516+ for (x = 0; x < match->domain_child_num; x++) {
47517+ if (match->domain_children[x] == uid)
47518+ goto found;
47519+ }
47520+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
47521+ break;
47522+ match = match->next;
47523+ }
47524+found:
47525+ if (match == NULL) {
47526+ try_group:
47527+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
47528+ match = acl_role_set.r_hash[index];
47529+
47530+ while (match) {
47531+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
47532+ for (x = 0; x < match->domain_child_num; x++) {
47533+ if (match->domain_children[x] == gid)
47534+ goto found2;
47535+ }
47536+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
47537+ break;
47538+ match = match->next;
47539+ }
47540+found2:
47541+ if (match == NULL)
47542+ match = default_role;
47543+ if (match->allowed_ips == NULL)
47544+ return match;
47545+ else {
47546+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47547+ if (likely
47548+ ((ntohl(curr_ip) & ipp->netmask) ==
47549+ (ntohl(ipp->addr) & ipp->netmask)))
47550+ return match;
47551+ }
47552+ match = default_role;
47553+ }
47554+ } else if (match->allowed_ips == NULL) {
47555+ return match;
47556+ } else {
47557+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47558+ if (likely
47559+ ((ntohl(curr_ip) & ipp->netmask) ==
47560+ (ntohl(ipp->addr) & ipp->netmask)))
47561+ return match;
47562+ }
47563+ goto try_group;
47564+ }
47565+
47566+ return match;
47567+}
47568+
47569+struct acl_subject_label *
47570+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47571+ const struct acl_role_label *role)
47572+{
47573+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47574+ struct acl_subject_label *match;
47575+
47576+ match = role->subj_hash[index];
47577+
47578+ while (match && (match->inode != ino || match->device != dev ||
47579+ (match->mode & GR_DELETED))) {
47580+ match = match->next;
47581+ }
47582+
47583+ if (match && !(match->mode & GR_DELETED))
47584+ return match;
47585+ else
47586+ return NULL;
47587+}
47588+
47589+struct acl_subject_label *
47590+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47591+ const struct acl_role_label *role)
47592+{
47593+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47594+ struct acl_subject_label *match;
47595+
47596+ match = role->subj_hash[index];
47597+
47598+ while (match && (match->inode != ino || match->device != dev ||
47599+ !(match->mode & GR_DELETED))) {
47600+ match = match->next;
47601+ }
47602+
47603+ if (match && (match->mode & GR_DELETED))
47604+ return match;
47605+ else
47606+ return NULL;
47607+}
47608+
47609+static struct acl_object_label *
47610+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47611+ const struct acl_subject_label *subj)
47612+{
47613+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47614+ struct acl_object_label *match;
47615+
47616+ match = subj->obj_hash[index];
47617+
47618+ while (match && (match->inode != ino || match->device != dev ||
47619+ (match->mode & GR_DELETED))) {
47620+ match = match->next;
47621+ }
47622+
47623+ if (match && !(match->mode & GR_DELETED))
47624+ return match;
47625+ else
47626+ return NULL;
47627+}
47628+
47629+static struct acl_object_label *
47630+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47631+ const struct acl_subject_label *subj)
47632+{
47633+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47634+ struct acl_object_label *match;
47635+
47636+ match = subj->obj_hash[index];
47637+
47638+ while (match && (match->inode != ino || match->device != dev ||
47639+ !(match->mode & GR_DELETED))) {
47640+ match = match->next;
47641+ }
47642+
47643+ if (match && (match->mode & GR_DELETED))
47644+ return match;
47645+
47646+ match = subj->obj_hash[index];
47647+
47648+ while (match && (match->inode != ino || match->device != dev ||
47649+ (match->mode & GR_DELETED))) {
47650+ match = match->next;
47651+ }
47652+
47653+ if (match && !(match->mode & GR_DELETED))
47654+ return match;
47655+ else
47656+ return NULL;
47657+}
47658+
47659+static struct name_entry *
47660+lookup_name_entry(const char *name)
47661+{
47662+ unsigned int len = strlen(name);
47663+ unsigned int key = full_name_hash(name, len);
47664+ unsigned int index = key % name_set.n_size;
47665+ struct name_entry *match;
47666+
47667+ match = name_set.n_hash[index];
47668+
47669+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
47670+ match = match->next;
47671+
47672+ return match;
47673+}
47674+
47675+static struct name_entry *
47676+lookup_name_entry_create(const char *name)
47677+{
47678+ unsigned int len = strlen(name);
47679+ unsigned int key = full_name_hash(name, len);
47680+ unsigned int index = key % name_set.n_size;
47681+ struct name_entry *match;
47682+
47683+ match = name_set.n_hash[index];
47684+
47685+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47686+ !match->deleted))
47687+ match = match->next;
47688+
47689+ if (match && match->deleted)
47690+ return match;
47691+
47692+ match = name_set.n_hash[index];
47693+
47694+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47695+ match->deleted))
47696+ match = match->next;
47697+
47698+ if (match && !match->deleted)
47699+ return match;
47700+ else
47701+ return NULL;
47702+}
47703+
47704+static struct inodev_entry *
47705+lookup_inodev_entry(const ino_t ino, const dev_t dev)
47706+{
47707+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
47708+ struct inodev_entry *match;
47709+
47710+ match = inodev_set.i_hash[index];
47711+
47712+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
47713+ match = match->next;
47714+
47715+ return match;
47716+}
47717+
47718+static void
47719+insert_inodev_entry(struct inodev_entry *entry)
47720+{
47721+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
47722+ inodev_set.i_size);
47723+ struct inodev_entry **curr;
47724+
47725+ entry->prev = NULL;
47726+
47727+ curr = &inodev_set.i_hash[index];
47728+ if (*curr != NULL)
47729+ (*curr)->prev = entry;
47730+
47731+ entry->next = *curr;
47732+ *curr = entry;
47733+
47734+ return;
47735+}
47736+
47737+static void
47738+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
47739+{
47740+ unsigned int index =
47741+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
47742+ struct acl_role_label **curr;
47743+ struct acl_role_label *tmp;
47744+
47745+ curr = &acl_role_set.r_hash[index];
47746+
47747+ /* if role was already inserted due to domains and already has
47748+ a role in the same bucket as it attached, then we need to
47749+ combine these two buckets
47750+ */
47751+ if (role->next) {
47752+ tmp = role->next;
47753+ while (tmp->next)
47754+ tmp = tmp->next;
47755+ tmp->next = *curr;
47756+ } else
47757+ role->next = *curr;
47758+ *curr = role;
47759+
47760+ return;
47761+}
47762+
47763+static void
47764+insert_acl_role_label(struct acl_role_label *role)
47765+{
47766+ int i;
47767+
47768+ if (role_list == NULL) {
47769+ role_list = role;
47770+ role->prev = NULL;
47771+ } else {
47772+ role->prev = role_list;
47773+ role_list = role;
47774+ }
47775+
47776+ /* used for hash chains */
47777+ role->next = NULL;
47778+
47779+ if (role->roletype & GR_ROLE_DOMAIN) {
47780+ for (i = 0; i < role->domain_child_num; i++)
47781+ __insert_acl_role_label(role, role->domain_children[i]);
47782+ } else
47783+ __insert_acl_role_label(role, role->uidgid);
47784+}
47785+
47786+static int
47787+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
47788+{
47789+ struct name_entry **curr, *nentry;
47790+ struct inodev_entry *ientry;
47791+ unsigned int len = strlen(name);
47792+ unsigned int key = full_name_hash(name, len);
47793+ unsigned int index = key % name_set.n_size;
47794+
47795+ curr = &name_set.n_hash[index];
47796+
47797+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
47798+ curr = &((*curr)->next);
47799+
47800+ if (*curr != NULL)
47801+ return 1;
47802+
47803+ nentry = acl_alloc(sizeof (struct name_entry));
47804+ if (nentry == NULL)
47805+ return 0;
47806+ ientry = acl_alloc(sizeof (struct inodev_entry));
47807+ if (ientry == NULL)
47808+ return 0;
47809+ ientry->nentry = nentry;
47810+
47811+ nentry->key = key;
47812+ nentry->name = name;
47813+ nentry->inode = inode;
47814+ nentry->device = device;
47815+ nentry->len = len;
47816+ nentry->deleted = deleted;
47817+
47818+ nentry->prev = NULL;
47819+ curr = &name_set.n_hash[index];
47820+ if (*curr != NULL)
47821+ (*curr)->prev = nentry;
47822+ nentry->next = *curr;
47823+ *curr = nentry;
47824+
47825+ /* insert us into the table searchable by inode/dev */
47826+ insert_inodev_entry(ientry);
47827+
47828+ return 1;
47829+}
47830+
47831+static void
47832+insert_acl_obj_label(struct acl_object_label *obj,
47833+ struct acl_subject_label *subj)
47834+{
47835+ unsigned int index =
47836+ fhash(obj->inode, obj->device, subj->obj_hash_size);
47837+ struct acl_object_label **curr;
47838+
47839+
47840+ obj->prev = NULL;
47841+
47842+ curr = &subj->obj_hash[index];
47843+ if (*curr != NULL)
47844+ (*curr)->prev = obj;
47845+
47846+ obj->next = *curr;
47847+ *curr = obj;
47848+
47849+ return;
47850+}
47851+
47852+static void
47853+insert_acl_subj_label(struct acl_subject_label *obj,
47854+ struct acl_role_label *role)
47855+{
47856+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
47857+ struct acl_subject_label **curr;
47858+
47859+ obj->prev = NULL;
47860+
47861+ curr = &role->subj_hash[index];
47862+ if (*curr != NULL)
47863+ (*curr)->prev = obj;
47864+
47865+ obj->next = *curr;
47866+ *curr = obj;
47867+
47868+ return;
47869+}
47870+
47871+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
47872+
47873+static void *
47874+create_table(__u32 * len, int elementsize)
47875+{
47876+ unsigned int table_sizes[] = {
47877+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
47878+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
47879+ 4194301, 8388593, 16777213, 33554393, 67108859
47880+ };
47881+ void *newtable = NULL;
47882+ unsigned int pwr = 0;
47883+
47884+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
47885+ table_sizes[pwr] <= *len)
47886+ pwr++;
47887+
47888+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
47889+ return newtable;
47890+
47891+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
47892+ newtable =
47893+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
47894+ else
47895+ newtable = vmalloc(table_sizes[pwr] * elementsize);
47896+
47897+ *len = table_sizes[pwr];
47898+
47899+ return newtable;
47900+}
47901+
47902+static int
47903+init_variables(const struct gr_arg *arg)
47904+{
47905+ struct task_struct *reaper = &init_task;
47906+ unsigned int stacksize;
47907+
47908+ subj_map_set.s_size = arg->role_db.num_subjects;
47909+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
47910+ name_set.n_size = arg->role_db.num_objects;
47911+ inodev_set.i_size = arg->role_db.num_objects;
47912+
47913+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
47914+ !name_set.n_size || !inodev_set.i_size)
47915+ return 1;
47916+
47917+ if (!gr_init_uidset())
47918+ return 1;
47919+
47920+ /* set up the stack that holds allocation info */
47921+
47922+ stacksize = arg->role_db.num_pointers + 5;
47923+
47924+ if (!acl_alloc_stack_init(stacksize))
47925+ return 1;
47926+
47927+ /* grab reference for the real root dentry and vfsmount */
47928+ read_lock(&reaper->fs->lock);
47929+ real_root = dget(reaper->fs->root.dentry);
47930+ real_root_mnt = mntget(reaper->fs->root.mnt);
47931+ read_unlock(&reaper->fs->lock);
47932+
47933+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47934+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
47935+#endif
47936+
47937+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
47938+ if (fakefs_obj_rw == NULL)
47939+ return 1;
47940+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
47941+
47942+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
47943+ if (fakefs_obj_rwx == NULL)
47944+ return 1;
47945+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
47946+
47947+ subj_map_set.s_hash =
47948+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
47949+ acl_role_set.r_hash =
47950+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
47951+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
47952+ inodev_set.i_hash =
47953+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
47954+
47955+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
47956+ !name_set.n_hash || !inodev_set.i_hash)
47957+ return 1;
47958+
47959+ memset(subj_map_set.s_hash, 0,
47960+ sizeof(struct subject_map *) * subj_map_set.s_size);
47961+ memset(acl_role_set.r_hash, 0,
47962+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
47963+ memset(name_set.n_hash, 0,
47964+ sizeof (struct name_entry *) * name_set.n_size);
47965+ memset(inodev_set.i_hash, 0,
47966+ sizeof (struct inodev_entry *) * inodev_set.i_size);
47967+
47968+ return 0;
47969+}
47970+
47971+/* free information not needed after startup
47972+ currently contains user->kernel pointer mappings for subjects
47973+*/
47974+
47975+static void
47976+free_init_variables(void)
47977+{
47978+ __u32 i;
47979+
47980+ if (subj_map_set.s_hash) {
47981+ for (i = 0; i < subj_map_set.s_size; i++) {
47982+ if (subj_map_set.s_hash[i]) {
47983+ kfree(subj_map_set.s_hash[i]);
47984+ subj_map_set.s_hash[i] = NULL;
47985+ }
47986+ }
47987+
47988+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
47989+ PAGE_SIZE)
47990+ kfree(subj_map_set.s_hash);
47991+ else
47992+ vfree(subj_map_set.s_hash);
47993+ }
47994+
47995+ return;
47996+}
47997+
47998+static void
47999+free_variables(void)
48000+{
48001+ struct acl_subject_label *s;
48002+ struct acl_role_label *r;
48003+ struct task_struct *task, *task2;
48004+ unsigned int x;
48005+
48006+ gr_clear_learn_entries();
48007+
48008+ read_lock(&tasklist_lock);
48009+ do_each_thread(task2, task) {
48010+ task->acl_sp_role = 0;
48011+ task->acl_role_id = 0;
48012+ task->acl = NULL;
48013+ task->role = NULL;
48014+ } while_each_thread(task2, task);
48015+ read_unlock(&tasklist_lock);
48016+
48017+ /* release the reference to the real root dentry and vfsmount */
48018+ if (real_root)
48019+ dput(real_root);
48020+ real_root = NULL;
48021+ if (real_root_mnt)
48022+ mntput(real_root_mnt);
48023+ real_root_mnt = NULL;
48024+
48025+ /* free all object hash tables */
48026+
48027+ FOR_EACH_ROLE_START(r)
48028+ if (r->subj_hash == NULL)
48029+ goto next_role;
48030+ FOR_EACH_SUBJECT_START(r, s, x)
48031+ if (s->obj_hash == NULL)
48032+ break;
48033+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48034+ kfree(s->obj_hash);
48035+ else
48036+ vfree(s->obj_hash);
48037+ FOR_EACH_SUBJECT_END(s, x)
48038+ FOR_EACH_NESTED_SUBJECT_START(r, s)
48039+ if (s->obj_hash == NULL)
48040+ break;
48041+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48042+ kfree(s->obj_hash);
48043+ else
48044+ vfree(s->obj_hash);
48045+ FOR_EACH_NESTED_SUBJECT_END(s)
48046+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
48047+ kfree(r->subj_hash);
48048+ else
48049+ vfree(r->subj_hash);
48050+ r->subj_hash = NULL;
48051+next_role:
48052+ FOR_EACH_ROLE_END(r)
48053+
48054+ acl_free_all();
48055+
48056+ if (acl_role_set.r_hash) {
48057+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
48058+ PAGE_SIZE)
48059+ kfree(acl_role_set.r_hash);
48060+ else
48061+ vfree(acl_role_set.r_hash);
48062+ }
48063+ if (name_set.n_hash) {
48064+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
48065+ PAGE_SIZE)
48066+ kfree(name_set.n_hash);
48067+ else
48068+ vfree(name_set.n_hash);
48069+ }
48070+
48071+ if (inodev_set.i_hash) {
48072+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
48073+ PAGE_SIZE)
48074+ kfree(inodev_set.i_hash);
48075+ else
48076+ vfree(inodev_set.i_hash);
48077+ }
48078+
48079+ gr_free_uidset();
48080+
48081+ memset(&name_set, 0, sizeof (struct name_db));
48082+ memset(&inodev_set, 0, sizeof (struct inodev_db));
48083+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
48084+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
48085+
48086+ default_role = NULL;
48087+ role_list = NULL;
48088+
48089+ return;
48090+}
48091+
48092+static __u32
48093+count_user_objs(struct acl_object_label *userp)
48094+{
48095+ struct acl_object_label o_tmp;
48096+ __u32 num = 0;
48097+
48098+ while (userp) {
48099+ if (copy_from_user(&o_tmp, userp,
48100+ sizeof (struct acl_object_label)))
48101+ break;
48102+
48103+ userp = o_tmp.prev;
48104+ num++;
48105+ }
48106+
48107+ return num;
48108+}
48109+
48110+static struct acl_subject_label *
48111+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
48112+
48113+static int
48114+copy_user_glob(struct acl_object_label *obj)
48115+{
48116+ struct acl_object_label *g_tmp, **guser;
48117+ unsigned int len;
48118+ char *tmp;
48119+
48120+ if (obj->globbed == NULL)
48121+ return 0;
48122+
48123+ guser = &obj->globbed;
48124+ while (*guser) {
48125+ g_tmp = (struct acl_object_label *)
48126+ acl_alloc(sizeof (struct acl_object_label));
48127+ if (g_tmp == NULL)
48128+ return -ENOMEM;
48129+
48130+ if (copy_from_user(g_tmp, *guser,
48131+ sizeof (struct acl_object_label)))
48132+ return -EFAULT;
48133+
48134+ len = strnlen_user(g_tmp->filename, PATH_MAX);
48135+
48136+ if (!len || len >= PATH_MAX)
48137+ return -EINVAL;
48138+
48139+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48140+ return -ENOMEM;
48141+
48142+ if (copy_from_user(tmp, g_tmp->filename, len))
48143+ return -EFAULT;
48144+ tmp[len-1] = '\0';
48145+ g_tmp->filename = tmp;
48146+
48147+ *guser = g_tmp;
48148+ guser = &(g_tmp->next);
48149+ }
48150+
48151+ return 0;
48152+}
48153+
48154+static int
48155+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
48156+ struct acl_role_label *role)
48157+{
48158+ struct acl_object_label *o_tmp;
48159+ unsigned int len;
48160+ int ret;
48161+ char *tmp;
48162+
48163+ while (userp) {
48164+ if ((o_tmp = (struct acl_object_label *)
48165+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
48166+ return -ENOMEM;
48167+
48168+ if (copy_from_user(o_tmp, userp,
48169+ sizeof (struct acl_object_label)))
48170+ return -EFAULT;
48171+
48172+ userp = o_tmp->prev;
48173+
48174+ len = strnlen_user(o_tmp->filename, PATH_MAX);
48175+
48176+ if (!len || len >= PATH_MAX)
48177+ return -EINVAL;
48178+
48179+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48180+ return -ENOMEM;
48181+
48182+ if (copy_from_user(tmp, o_tmp->filename, len))
48183+ return -EFAULT;
48184+ tmp[len-1] = '\0';
48185+ o_tmp->filename = tmp;
48186+
48187+ insert_acl_obj_label(o_tmp, subj);
48188+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
48189+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
48190+ return -ENOMEM;
48191+
48192+ ret = copy_user_glob(o_tmp);
48193+ if (ret)
48194+ return ret;
48195+
48196+ if (o_tmp->nested) {
48197+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
48198+ if (IS_ERR(o_tmp->nested))
48199+ return PTR_ERR(o_tmp->nested);
48200+
48201+ /* insert into nested subject list */
48202+ o_tmp->nested->next = role->hash->first;
48203+ role->hash->first = o_tmp->nested;
48204+ }
48205+ }
48206+
48207+ return 0;
48208+}
48209+
48210+static __u32
48211+count_user_subjs(struct acl_subject_label *userp)
48212+{
48213+ struct acl_subject_label s_tmp;
48214+ __u32 num = 0;
48215+
48216+ while (userp) {
48217+ if (copy_from_user(&s_tmp, userp,
48218+ sizeof (struct acl_subject_label)))
48219+ break;
48220+
48221+ userp = s_tmp.prev;
48222+ /* do not count nested subjects against this count, since
48223+ they are not included in the hash table, but are
48224+ attached to objects. We have already counted
48225+ the subjects in userspace for the allocation
48226+ stack
48227+ */
48228+ if (!(s_tmp.mode & GR_NESTED))
48229+ num++;
48230+ }
48231+
48232+ return num;
48233+}
48234+
48235+static int
48236+copy_user_allowedips(struct acl_role_label *rolep)
48237+{
48238+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
48239+
48240+ ruserip = rolep->allowed_ips;
48241+
48242+ while (ruserip) {
48243+ rlast = rtmp;
48244+
48245+ if ((rtmp = (struct role_allowed_ip *)
48246+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
48247+ return -ENOMEM;
48248+
48249+ if (copy_from_user(rtmp, ruserip,
48250+ sizeof (struct role_allowed_ip)))
48251+ return -EFAULT;
48252+
48253+ ruserip = rtmp->prev;
48254+
48255+ if (!rlast) {
48256+ rtmp->prev = NULL;
48257+ rolep->allowed_ips = rtmp;
48258+ } else {
48259+ rlast->next = rtmp;
48260+ rtmp->prev = rlast;
48261+ }
48262+
48263+ if (!ruserip)
48264+ rtmp->next = NULL;
48265+ }
48266+
48267+ return 0;
48268+}
48269+
48270+static int
48271+copy_user_transitions(struct acl_role_label *rolep)
48272+{
48273+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
48274+
48275+ unsigned int len;
48276+ char *tmp;
48277+
48278+ rusertp = rolep->transitions;
48279+
48280+ while (rusertp) {
48281+ rlast = rtmp;
48282+
48283+ if ((rtmp = (struct role_transition *)
48284+ acl_alloc(sizeof (struct role_transition))) == NULL)
48285+ return -ENOMEM;
48286+
48287+ if (copy_from_user(rtmp, rusertp,
48288+ sizeof (struct role_transition)))
48289+ return -EFAULT;
48290+
48291+ rusertp = rtmp->prev;
48292+
48293+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
48294+
48295+ if (!len || len >= GR_SPROLE_LEN)
48296+ return -EINVAL;
48297+
48298+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48299+ return -ENOMEM;
48300+
48301+ if (copy_from_user(tmp, rtmp->rolename, len))
48302+ return -EFAULT;
48303+ tmp[len-1] = '\0';
48304+ rtmp->rolename = tmp;
48305+
48306+ if (!rlast) {
48307+ rtmp->prev = NULL;
48308+ rolep->transitions = rtmp;
48309+ } else {
48310+ rlast->next = rtmp;
48311+ rtmp->prev = rlast;
48312+ }
48313+
48314+ if (!rusertp)
48315+ rtmp->next = NULL;
48316+ }
48317+
48318+ return 0;
48319+}
48320+
48321+static struct acl_subject_label *
48322+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
48323+{
48324+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
48325+ unsigned int len;
48326+ char *tmp;
48327+ __u32 num_objs;
48328+ struct acl_ip_label **i_tmp, *i_utmp2;
48329+ struct gr_hash_struct ghash;
48330+ struct subject_map *subjmap;
48331+ unsigned int i_num;
48332+ int err;
48333+
48334+ s_tmp = lookup_subject_map(userp);
48335+
48336+ /* we've already copied this subject into the kernel, just return
48337+ the reference to it, and don't copy it over again
48338+ */
48339+ if (s_tmp)
48340+ return(s_tmp);
48341+
48342+ if ((s_tmp = (struct acl_subject_label *)
48343+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
48344+ return ERR_PTR(-ENOMEM);
48345+
48346+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
48347+ if (subjmap == NULL)
48348+ return ERR_PTR(-ENOMEM);
48349+
48350+ subjmap->user = userp;
48351+ subjmap->kernel = s_tmp;
48352+ insert_subj_map_entry(subjmap);
48353+
48354+ if (copy_from_user(s_tmp, userp,
48355+ sizeof (struct acl_subject_label)))
48356+ return ERR_PTR(-EFAULT);
48357+
48358+ len = strnlen_user(s_tmp->filename, PATH_MAX);
48359+
48360+ if (!len || len >= PATH_MAX)
48361+ return ERR_PTR(-EINVAL);
48362+
48363+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48364+ return ERR_PTR(-ENOMEM);
48365+
48366+ if (copy_from_user(tmp, s_tmp->filename, len))
48367+ return ERR_PTR(-EFAULT);
48368+ tmp[len-1] = '\0';
48369+ s_tmp->filename = tmp;
48370+
48371+ if (!strcmp(s_tmp->filename, "/"))
48372+ role->root_label = s_tmp;
48373+
48374+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
48375+ return ERR_PTR(-EFAULT);
48376+
48377+ /* copy user and group transition tables */
48378+
48379+ if (s_tmp->user_trans_num) {
48380+ uid_t *uidlist;
48381+
48382+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
48383+ if (uidlist == NULL)
48384+ return ERR_PTR(-ENOMEM);
48385+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
48386+ return ERR_PTR(-EFAULT);
48387+
48388+ s_tmp->user_transitions = uidlist;
48389+ }
48390+
48391+ if (s_tmp->group_trans_num) {
48392+ gid_t *gidlist;
48393+
48394+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
48395+ if (gidlist == NULL)
48396+ return ERR_PTR(-ENOMEM);
48397+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
48398+ return ERR_PTR(-EFAULT);
48399+
48400+ s_tmp->group_transitions = gidlist;
48401+ }
48402+
48403+ /* set up object hash table */
48404+ num_objs = count_user_objs(ghash.first);
48405+
48406+ s_tmp->obj_hash_size = num_objs;
48407+ s_tmp->obj_hash =
48408+ (struct acl_object_label **)
48409+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
48410+
48411+ if (!s_tmp->obj_hash)
48412+ return ERR_PTR(-ENOMEM);
48413+
48414+ memset(s_tmp->obj_hash, 0,
48415+ s_tmp->obj_hash_size *
48416+ sizeof (struct acl_object_label *));
48417+
48418+ /* add in objects */
48419+ err = copy_user_objs(ghash.first, s_tmp, role);
48420+
48421+ if (err)
48422+ return ERR_PTR(err);
48423+
48424+ /* set pointer for parent subject */
48425+ if (s_tmp->parent_subject) {
48426+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
48427+
48428+ if (IS_ERR(s_tmp2))
48429+ return s_tmp2;
48430+
48431+ s_tmp->parent_subject = s_tmp2;
48432+ }
48433+
48434+ /* add in ip acls */
48435+
48436+ if (!s_tmp->ip_num) {
48437+ s_tmp->ips = NULL;
48438+ goto insert;
48439+ }
48440+
48441+ i_tmp =
48442+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
48443+ sizeof (struct acl_ip_label *));
48444+
48445+ if (!i_tmp)
48446+ return ERR_PTR(-ENOMEM);
48447+
48448+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
48449+ *(i_tmp + i_num) =
48450+ (struct acl_ip_label *)
48451+ acl_alloc(sizeof (struct acl_ip_label));
48452+ if (!*(i_tmp + i_num))
48453+ return ERR_PTR(-ENOMEM);
48454+
48455+ if (copy_from_user
48456+ (&i_utmp2, s_tmp->ips + i_num,
48457+ sizeof (struct acl_ip_label *)))
48458+ return ERR_PTR(-EFAULT);
48459+
48460+ if (copy_from_user
48461+ (*(i_tmp + i_num), i_utmp2,
48462+ sizeof (struct acl_ip_label)))
48463+ return ERR_PTR(-EFAULT);
48464+
48465+ if ((*(i_tmp + i_num))->iface == NULL)
48466+ continue;
48467+
48468+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
48469+ if (!len || len >= IFNAMSIZ)
48470+ return ERR_PTR(-EINVAL);
48471+ tmp = acl_alloc(len);
48472+ if (tmp == NULL)
48473+ return ERR_PTR(-ENOMEM);
48474+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
48475+ return ERR_PTR(-EFAULT);
48476+ (*(i_tmp + i_num))->iface = tmp;
48477+ }
48478+
48479+ s_tmp->ips = i_tmp;
48480+
48481+insert:
48482+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
48483+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
48484+ return ERR_PTR(-ENOMEM);
48485+
48486+ return s_tmp;
48487+}
48488+
48489+static int
48490+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
48491+{
48492+ struct acl_subject_label s_pre;
48493+ struct acl_subject_label * ret;
48494+ int err;
48495+
48496+ while (userp) {
48497+ if (copy_from_user(&s_pre, userp,
48498+ sizeof (struct acl_subject_label)))
48499+ return -EFAULT;
48500+
48501+ /* do not add nested subjects here, add
48502+ while parsing objects
48503+ */
48504+
48505+ if (s_pre.mode & GR_NESTED) {
48506+ userp = s_pre.prev;
48507+ continue;
48508+ }
48509+
48510+ ret = do_copy_user_subj(userp, role);
48511+
48512+ err = PTR_ERR(ret);
48513+ if (IS_ERR(ret))
48514+ return err;
48515+
48516+ insert_acl_subj_label(ret, role);
48517+
48518+ userp = s_pre.prev;
48519+ }
48520+
48521+ return 0;
48522+}
48523+
48524+static int
48525+copy_user_acl(struct gr_arg *arg)
48526+{
48527+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
48528+ struct sprole_pw *sptmp;
48529+ struct gr_hash_struct *ghash;
48530+ uid_t *domainlist;
48531+ unsigned int r_num;
48532+ unsigned int len;
48533+ char *tmp;
48534+ int err = 0;
48535+ __u16 i;
48536+ __u32 num_subjs;
48537+
48538+ /* we need a default and kernel role */
48539+ if (arg->role_db.num_roles < 2)
48540+ return -EINVAL;
48541+
48542+ /* copy special role authentication info from userspace */
48543+
48544+ num_sprole_pws = arg->num_sprole_pws;
48545+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48546+
48547+ if (!acl_special_roles) {
48548+ err = -ENOMEM;
48549+ goto cleanup;
48550+ }
48551+
48552+ for (i = 0; i < num_sprole_pws; i++) {
48553+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48554+ if (!sptmp) {
48555+ err = -ENOMEM;
48556+ goto cleanup;
48557+ }
48558+ if (copy_from_user(sptmp, arg->sprole_pws + i,
48559+ sizeof (struct sprole_pw))) {
48560+ err = -EFAULT;
48561+ goto cleanup;
48562+ }
48563+
48564+ len =
48565+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48566+
48567+ if (!len || len >= GR_SPROLE_LEN) {
48568+ err = -EINVAL;
48569+ goto cleanup;
48570+ }
48571+
48572+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48573+ err = -ENOMEM;
48574+ goto cleanup;
48575+ }
48576+
48577+ if (copy_from_user(tmp, sptmp->rolename, len)) {
48578+ err = -EFAULT;
48579+ goto cleanup;
48580+ }
48581+ tmp[len-1] = '\0';
48582+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48583+ printk(KERN_ALERT "Copying special role %s\n", tmp);
48584+#endif
48585+ sptmp->rolename = tmp;
48586+ acl_special_roles[i] = sptmp;
48587+ }
48588+
48589+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48590+
48591+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48592+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
48593+
48594+ if (!r_tmp) {
48595+ err = -ENOMEM;
48596+ goto cleanup;
48597+ }
48598+
48599+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
48600+ sizeof (struct acl_role_label *))) {
48601+ err = -EFAULT;
48602+ goto cleanup;
48603+ }
48604+
48605+ if (copy_from_user(r_tmp, r_utmp2,
48606+ sizeof (struct acl_role_label))) {
48607+ err = -EFAULT;
48608+ goto cleanup;
48609+ }
48610+
48611+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48612+
48613+ if (!len || len >= PATH_MAX) {
48614+ err = -EINVAL;
48615+ goto cleanup;
48616+ }
48617+
48618+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48619+ err = -ENOMEM;
48620+ goto cleanup;
48621+ }
48622+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
48623+ err = -EFAULT;
48624+ goto cleanup;
48625+ }
48626+ tmp[len-1] = '\0';
48627+ r_tmp->rolename = tmp;
48628+
48629+ if (!strcmp(r_tmp->rolename, "default")
48630+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48631+ default_role = r_tmp;
48632+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48633+ kernel_role = r_tmp;
48634+ }
48635+
48636+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48637+ err = -ENOMEM;
48638+ goto cleanup;
48639+ }
48640+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
48641+ err = -EFAULT;
48642+ goto cleanup;
48643+ }
48644+
48645+ r_tmp->hash = ghash;
48646+
48647+ num_subjs = count_user_subjs(r_tmp->hash->first);
48648+
48649+ r_tmp->subj_hash_size = num_subjs;
48650+ r_tmp->subj_hash =
48651+ (struct acl_subject_label **)
48652+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
48653+
48654+ if (!r_tmp->subj_hash) {
48655+ err = -ENOMEM;
48656+ goto cleanup;
48657+ }
48658+
48659+ err = copy_user_allowedips(r_tmp);
48660+ if (err)
48661+ goto cleanup;
48662+
48663+ /* copy domain info */
48664+ if (r_tmp->domain_children != NULL) {
48665+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
48666+ if (domainlist == NULL) {
48667+ err = -ENOMEM;
48668+ goto cleanup;
48669+ }
48670+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
48671+ err = -EFAULT;
48672+ goto cleanup;
48673+ }
48674+ r_tmp->domain_children = domainlist;
48675+ }
48676+
48677+ err = copy_user_transitions(r_tmp);
48678+ if (err)
48679+ goto cleanup;
48680+
48681+ memset(r_tmp->subj_hash, 0,
48682+ r_tmp->subj_hash_size *
48683+ sizeof (struct acl_subject_label *));
48684+
48685+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
48686+
48687+ if (err)
48688+ goto cleanup;
48689+
48690+ /* set nested subject list to null */
48691+ r_tmp->hash->first = NULL;
48692+
48693+ insert_acl_role_label(r_tmp);
48694+ }
48695+
48696+ goto return_err;
48697+ cleanup:
48698+ free_variables();
48699+ return_err:
48700+ return err;
48701+
48702+}
48703+
48704+static int
48705+gracl_init(struct gr_arg *args)
48706+{
48707+ int error = 0;
48708+
48709+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
48710+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
48711+
48712+ if (init_variables(args)) {
48713+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
48714+ error = -ENOMEM;
48715+ free_variables();
48716+ goto out;
48717+ }
48718+
48719+ error = copy_user_acl(args);
48720+ free_init_variables();
48721+ if (error) {
48722+ free_variables();
48723+ goto out;
48724+ }
48725+
48726+ if ((error = gr_set_acls(0))) {
48727+ free_variables();
48728+ goto out;
48729+ }
48730+
48731+ pax_open_kernel();
48732+ gr_status |= GR_READY;
48733+ pax_close_kernel();
48734+
48735+ out:
48736+ return error;
48737+}
48738+
48739+/* derived from glibc fnmatch() 0: match, 1: no match*/
48740+
48741+static int
48742+glob_match(const char *p, const char *n)
48743+{
48744+ char c;
48745+
48746+ while ((c = *p++) != '\0') {
48747+ switch (c) {
48748+ case '?':
48749+ if (*n == '\0')
48750+ return 1;
48751+ else if (*n == '/')
48752+ return 1;
48753+ break;
48754+ case '\\':
48755+ if (*n != c)
48756+ return 1;
48757+ break;
48758+ case '*':
48759+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
48760+ if (*n == '/')
48761+ return 1;
48762+ else if (c == '?') {
48763+ if (*n == '\0')
48764+ return 1;
48765+ else
48766+ ++n;
48767+ }
48768+ }
48769+ if (c == '\0') {
48770+ return 0;
48771+ } else {
48772+ const char *endp;
48773+
48774+ if ((endp = strchr(n, '/')) == NULL)
48775+ endp = n + strlen(n);
48776+
48777+ if (c == '[') {
48778+ for (--p; n < endp; ++n)
48779+ if (!glob_match(p, n))
48780+ return 0;
48781+ } else if (c == '/') {
48782+ while (*n != '\0' && *n != '/')
48783+ ++n;
48784+ if (*n == '/' && !glob_match(p, n + 1))
48785+ return 0;
48786+ } else {
48787+ for (--p; n < endp; ++n)
48788+ if (*n == c && !glob_match(p, n))
48789+ return 0;
48790+ }
48791+
48792+ return 1;
48793+ }
48794+ case '[':
48795+ {
48796+ int not;
48797+ char cold;
48798+
48799+ if (*n == '\0' || *n == '/')
48800+ return 1;
48801+
48802+ not = (*p == '!' || *p == '^');
48803+ if (not)
48804+ ++p;
48805+
48806+ c = *p++;
48807+ for (;;) {
48808+ unsigned char fn = (unsigned char)*n;
48809+
48810+ if (c == '\0')
48811+ return 1;
48812+ else {
48813+ if (c == fn)
48814+ goto matched;
48815+ cold = c;
48816+ c = *p++;
48817+
48818+ if (c == '-' && *p != ']') {
48819+ unsigned char cend = *p++;
48820+
48821+ if (cend == '\0')
48822+ return 1;
48823+
48824+ if (cold <= fn && fn <= cend)
48825+ goto matched;
48826+
48827+ c = *p++;
48828+ }
48829+ }
48830+
48831+ if (c == ']')
48832+ break;
48833+ }
48834+ if (!not)
48835+ return 1;
48836+ break;
48837+ matched:
48838+ while (c != ']') {
48839+ if (c == '\0')
48840+ return 1;
48841+
48842+ c = *p++;
48843+ }
48844+ if (not)
48845+ return 1;
48846+ }
48847+ break;
48848+ default:
48849+ if (c != *n)
48850+ return 1;
48851+ }
48852+
48853+ ++n;
48854+ }
48855+
48856+ if (*n == '\0')
48857+ return 0;
48858+
48859+ if (*n == '/')
48860+ return 0;
48861+
48862+ return 1;
48863+}
48864+
48865+static struct acl_object_label *
48866+chk_glob_label(struct acl_object_label *globbed,
48867+ struct dentry *dentry, struct vfsmount *mnt, char **path)
48868+{
48869+ struct acl_object_label *tmp;
48870+
48871+ if (*path == NULL)
48872+ *path = gr_to_filename_nolock(dentry, mnt);
48873+
48874+ tmp = globbed;
48875+
48876+ while (tmp) {
48877+ if (!glob_match(tmp->filename, *path))
48878+ return tmp;
48879+ tmp = tmp->next;
48880+ }
48881+
48882+ return NULL;
48883+}
48884+
48885+static struct acl_object_label *
48886+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48887+ const ino_t curr_ino, const dev_t curr_dev,
48888+ const struct acl_subject_label *subj, char **path, const int checkglob)
48889+{
48890+ struct acl_subject_label *tmpsubj;
48891+ struct acl_object_label *retval;
48892+ struct acl_object_label *retval2;
48893+
48894+ tmpsubj = (struct acl_subject_label *) subj;
48895+ read_lock(&gr_inode_lock);
48896+ do {
48897+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
48898+ if (retval) {
48899+ if (checkglob && retval->globbed) {
48900+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
48901+ (struct vfsmount *)orig_mnt, path);
48902+ if (retval2)
48903+ retval = retval2;
48904+ }
48905+ break;
48906+ }
48907+ } while ((tmpsubj = tmpsubj->parent_subject));
48908+ read_unlock(&gr_inode_lock);
48909+
48910+ return retval;
48911+}
48912+
48913+static __inline__ struct acl_object_label *
48914+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
48915+ const struct dentry *curr_dentry,
48916+ const struct acl_subject_label *subj, char **path, const int checkglob)
48917+{
48918+ int newglob = checkglob;
48919+
48920+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
48921+ as we don't want a / * rule to match instead of the / object
48922+ don't do this for create lookups that call this function though, since they're looking up
48923+ on the parent and thus need globbing checks on all paths
48924+ */
48925+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
48926+ newglob = GR_NO_GLOB;
48927+
48928+ return __full_lookup(orig_dentry, orig_mnt,
48929+ curr_dentry->d_inode->i_ino,
48930+ __get_dev(curr_dentry), subj, path, newglob);
48931+}
48932+
48933+static struct acl_object_label *
48934+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48935+ const struct acl_subject_label *subj, char *path, const int checkglob)
48936+{
48937+ struct dentry *dentry = (struct dentry *) l_dentry;
48938+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
48939+ struct acl_object_label *retval;
48940+
48941+ spin_lock(&dcache_lock);
48942+ spin_lock(&vfsmount_lock);
48943+
48944+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
48945+#ifdef CONFIG_NET
48946+ mnt == sock_mnt ||
48947+#endif
48948+#ifdef CONFIG_HUGETLBFS
48949+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
48950+#endif
48951+ /* ignore Eric Biederman */
48952+ IS_PRIVATE(l_dentry->d_inode))) {
48953+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
48954+ goto out;
48955+ }
48956+
48957+ for (;;) {
48958+ if (dentry == real_root && mnt == real_root_mnt)
48959+ break;
48960+
48961+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
48962+ if (mnt->mnt_parent == mnt)
48963+ break;
48964+
48965+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48966+ if (retval != NULL)
48967+ goto out;
48968+
48969+ dentry = mnt->mnt_mountpoint;
48970+ mnt = mnt->mnt_parent;
48971+ continue;
48972+ }
48973+
48974+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48975+ if (retval != NULL)
48976+ goto out;
48977+
48978+ dentry = dentry->d_parent;
48979+ }
48980+
48981+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
48982+
48983+ if (retval == NULL)
48984+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
48985+out:
48986+ spin_unlock(&vfsmount_lock);
48987+ spin_unlock(&dcache_lock);
48988+
48989+ BUG_ON(retval == NULL);
48990+
48991+ return retval;
48992+}
48993+
48994+static __inline__ struct acl_object_label *
48995+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
48996+ const struct acl_subject_label *subj)
48997+{
48998+ char *path = NULL;
48999+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
49000+}
49001+
49002+static __inline__ struct acl_object_label *
49003+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49004+ const struct acl_subject_label *subj)
49005+{
49006+ char *path = NULL;
49007+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
49008+}
49009+
49010+static __inline__ struct acl_object_label *
49011+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49012+ const struct acl_subject_label *subj, char *path)
49013+{
49014+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
49015+}
49016+
49017+static struct acl_subject_label *
49018+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49019+ const struct acl_role_label *role)
49020+{
49021+ struct dentry *dentry = (struct dentry *) l_dentry;
49022+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49023+ struct acl_subject_label *retval;
49024+
49025+ spin_lock(&dcache_lock);
49026+ spin_lock(&vfsmount_lock);
49027+
49028+ for (;;) {
49029+ if (dentry == real_root && mnt == real_root_mnt)
49030+ break;
49031+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49032+ if (mnt->mnt_parent == mnt)
49033+ break;
49034+
49035+ read_lock(&gr_inode_lock);
49036+ retval =
49037+ lookup_acl_subj_label(dentry->d_inode->i_ino,
49038+ __get_dev(dentry), role);
49039+ read_unlock(&gr_inode_lock);
49040+ if (retval != NULL)
49041+ goto out;
49042+
49043+ dentry = mnt->mnt_mountpoint;
49044+ mnt = mnt->mnt_parent;
49045+ continue;
49046+ }
49047+
49048+ read_lock(&gr_inode_lock);
49049+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
49050+ __get_dev(dentry), role);
49051+ read_unlock(&gr_inode_lock);
49052+ if (retval != NULL)
49053+ goto out;
49054+
49055+ dentry = dentry->d_parent;
49056+ }
49057+
49058+ read_lock(&gr_inode_lock);
49059+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
49060+ __get_dev(dentry), role);
49061+ read_unlock(&gr_inode_lock);
49062+
49063+ if (unlikely(retval == NULL)) {
49064+ read_lock(&gr_inode_lock);
49065+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
49066+ __get_dev(real_root), role);
49067+ read_unlock(&gr_inode_lock);
49068+ }
49069+out:
49070+ spin_unlock(&vfsmount_lock);
49071+ spin_unlock(&dcache_lock);
49072+
49073+ BUG_ON(retval == NULL);
49074+
49075+ return retval;
49076+}
49077+
49078+static void
49079+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
49080+{
49081+ struct task_struct *task = current;
49082+ const struct cred *cred = current_cred();
49083+
49084+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
49085+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49086+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49087+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
49088+
49089+ return;
49090+}
49091+
49092+static void
49093+gr_log_learn_sysctl(const char *path, const __u32 mode)
49094+{
49095+ struct task_struct *task = current;
49096+ const struct cred *cred = current_cred();
49097+
49098+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
49099+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49100+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49101+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
49102+
49103+ return;
49104+}
49105+
49106+static void
49107+gr_log_learn_id_change(const char type, const unsigned int real,
49108+ const unsigned int effective, const unsigned int fs)
49109+{
49110+ struct task_struct *task = current;
49111+ const struct cred *cred = current_cred();
49112+
49113+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
49114+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49115+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49116+ type, real, effective, fs, &task->signal->saved_ip);
49117+
49118+ return;
49119+}
49120+
49121+__u32
49122+gr_check_link(const struct dentry * new_dentry,
49123+ const struct dentry * parent_dentry,
49124+ const struct vfsmount * parent_mnt,
49125+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
49126+{
49127+ struct acl_object_label *obj;
49128+ __u32 oldmode, newmode;
49129+ __u32 needmode;
49130+
49131+ if (unlikely(!(gr_status & GR_READY)))
49132+ return (GR_CREATE | GR_LINK);
49133+
49134+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
49135+ oldmode = obj->mode;
49136+
49137+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49138+ oldmode |= (GR_CREATE | GR_LINK);
49139+
49140+ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
49141+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49142+ needmode |= GR_SETID | GR_AUDIT_SETID;
49143+
49144+ newmode =
49145+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
49146+ oldmode | needmode);
49147+
49148+ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
49149+ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
49150+ GR_INHERIT | GR_AUDIT_INHERIT);
49151+
49152+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
49153+ goto bad;
49154+
49155+ if ((oldmode & needmode) != needmode)
49156+ goto bad;
49157+
49158+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
49159+ if ((newmode & needmode) != needmode)
49160+ goto bad;
49161+
49162+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
49163+ return newmode;
49164+bad:
49165+ needmode = oldmode;
49166+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49167+ needmode |= GR_SETID;
49168+
49169+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49170+ gr_log_learn(old_dentry, old_mnt, needmode);
49171+ return (GR_CREATE | GR_LINK);
49172+ } else if (newmode & GR_SUPPRESS)
49173+ return GR_SUPPRESS;
49174+ else
49175+ return 0;
49176+}
49177+
49178+__u32
49179+gr_search_file(const struct dentry * dentry, const __u32 mode,
49180+ const struct vfsmount * mnt)
49181+{
49182+ __u32 retval = mode;
49183+ struct acl_subject_label *curracl;
49184+ struct acl_object_label *currobj;
49185+
49186+ if (unlikely(!(gr_status & GR_READY)))
49187+ return (mode & ~GR_AUDITS);
49188+
49189+ curracl = current->acl;
49190+
49191+ currobj = chk_obj_label(dentry, mnt, curracl);
49192+ retval = currobj->mode & mode;
49193+
49194+ /* if we're opening a specified transfer file for writing
49195+ (e.g. /dev/initctl), then transfer our role to init
49196+ */
49197+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
49198+ current->role->roletype & GR_ROLE_PERSIST)) {
49199+ struct task_struct *task = init_pid_ns.child_reaper;
49200+
49201+ if (task->role != current->role) {
49202+ task->acl_sp_role = 0;
49203+ task->acl_role_id = current->acl_role_id;
49204+ task->role = current->role;
49205+ rcu_read_lock();
49206+ read_lock(&grsec_exec_file_lock);
49207+ gr_apply_subject_to_task(task);
49208+ read_unlock(&grsec_exec_file_lock);
49209+ rcu_read_unlock();
49210+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
49211+ }
49212+ }
49213+
49214+ if (unlikely
49215+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
49216+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
49217+ __u32 new_mode = mode;
49218+
49219+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49220+
49221+ retval = new_mode;
49222+
49223+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
49224+ new_mode |= GR_INHERIT;
49225+
49226+ if (!(mode & GR_NOLEARN))
49227+ gr_log_learn(dentry, mnt, new_mode);
49228+ }
49229+
49230+ return retval;
49231+}
49232+
49233+__u32
49234+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
49235+ const struct vfsmount * mnt, const __u32 mode)
49236+{
49237+ struct name_entry *match;
49238+ struct acl_object_label *matchpo;
49239+ struct acl_subject_label *curracl;
49240+ char *path;
49241+ __u32 retval;
49242+
49243+ if (unlikely(!(gr_status & GR_READY)))
49244+ return (mode & ~GR_AUDITS);
49245+
49246+ preempt_disable();
49247+ path = gr_to_filename_rbac(new_dentry, mnt);
49248+ match = lookup_name_entry_create(path);
49249+
49250+ if (!match)
49251+ goto check_parent;
49252+
49253+ curracl = current->acl;
49254+
49255+ read_lock(&gr_inode_lock);
49256+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
49257+ read_unlock(&gr_inode_lock);
49258+
49259+ if (matchpo) {
49260+ if ((matchpo->mode & mode) !=
49261+ (mode & ~(GR_AUDITS | GR_SUPPRESS))
49262+ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49263+ __u32 new_mode = mode;
49264+
49265+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49266+
49267+ gr_log_learn(new_dentry, mnt, new_mode);
49268+
49269+ preempt_enable();
49270+ return new_mode;
49271+ }
49272+ preempt_enable();
49273+ return (matchpo->mode & mode);
49274+ }
49275+
49276+ check_parent:
49277+ curracl = current->acl;
49278+
49279+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
49280+ retval = matchpo->mode & mode;
49281+
49282+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
49283+ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
49284+ __u32 new_mode = mode;
49285+
49286+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49287+
49288+ gr_log_learn(new_dentry, mnt, new_mode);
49289+ preempt_enable();
49290+ return new_mode;
49291+ }
49292+
49293+ preempt_enable();
49294+ return retval;
49295+}
49296+
49297+int
49298+gr_check_hidden_task(const struct task_struct *task)
49299+{
49300+ if (unlikely(!(gr_status & GR_READY)))
49301+ return 0;
49302+
49303+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
49304+ return 1;
49305+
49306+ return 0;
49307+}
49308+
49309+int
49310+gr_check_protected_task(const struct task_struct *task)
49311+{
49312+ if (unlikely(!(gr_status & GR_READY) || !task))
49313+ return 0;
49314+
49315+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49316+ task->acl != current->acl)
49317+ return 1;
49318+
49319+ return 0;
49320+}
49321+
49322+int
49323+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49324+{
49325+ struct task_struct *p;
49326+ int ret = 0;
49327+
49328+ if (unlikely(!(gr_status & GR_READY) || !pid))
49329+ return ret;
49330+
49331+ read_lock(&tasklist_lock);
49332+ do_each_pid_task(pid, type, p) {
49333+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49334+ p->acl != current->acl) {
49335+ ret = 1;
49336+ goto out;
49337+ }
49338+ } while_each_pid_task(pid, type, p);
49339+out:
49340+ read_unlock(&tasklist_lock);
49341+
49342+ return ret;
49343+}
49344+
49345+void
49346+gr_copy_label(struct task_struct *tsk)
49347+{
49348+ tsk->signal->used_accept = 0;
49349+ tsk->acl_sp_role = 0;
49350+ tsk->acl_role_id = current->acl_role_id;
49351+ tsk->acl = current->acl;
49352+ tsk->role = current->role;
49353+ tsk->signal->curr_ip = current->signal->curr_ip;
49354+ tsk->signal->saved_ip = current->signal->saved_ip;
49355+ if (current->exec_file)
49356+ get_file(current->exec_file);
49357+ tsk->exec_file = current->exec_file;
49358+ tsk->is_writable = current->is_writable;
49359+ if (unlikely(current->signal->used_accept)) {
49360+ current->signal->curr_ip = 0;
49361+ current->signal->saved_ip = 0;
49362+ }
49363+
49364+ return;
49365+}
49366+
49367+static void
49368+gr_set_proc_res(struct task_struct *task)
49369+{
49370+ struct acl_subject_label *proc;
49371+ unsigned short i;
49372+
49373+ proc = task->acl;
49374+
49375+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
49376+ return;
49377+
49378+ for (i = 0; i < RLIM_NLIMITS; i++) {
49379+ if (!(proc->resmask & (1 << i)))
49380+ continue;
49381+
49382+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
49383+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
49384+ }
49385+
49386+ return;
49387+}
49388+
49389+extern int __gr_process_user_ban(struct user_struct *user);
49390+
49391+int
49392+gr_check_user_change(int real, int effective, int fs)
49393+{
49394+ unsigned int i;
49395+ __u16 num;
49396+ uid_t *uidlist;
49397+ int curuid;
49398+ int realok = 0;
49399+ int effectiveok = 0;
49400+ int fsok = 0;
49401+
49402+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49403+ struct user_struct *user;
49404+
49405+ if (real == -1)
49406+ goto skipit;
49407+
49408+ user = find_user(real);
49409+ if (user == NULL)
49410+ goto skipit;
49411+
49412+ if (__gr_process_user_ban(user)) {
49413+ /* for find_user */
49414+ free_uid(user);
49415+ return 1;
49416+ }
49417+
49418+ /* for find_user */
49419+ free_uid(user);
49420+
49421+skipit:
49422+#endif
49423+
49424+ if (unlikely(!(gr_status & GR_READY)))
49425+ return 0;
49426+
49427+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49428+ gr_log_learn_id_change('u', real, effective, fs);
49429+
49430+ num = current->acl->user_trans_num;
49431+ uidlist = current->acl->user_transitions;
49432+
49433+ if (uidlist == NULL)
49434+ return 0;
49435+
49436+ if (real == -1)
49437+ realok = 1;
49438+ if (effective == -1)
49439+ effectiveok = 1;
49440+ if (fs == -1)
49441+ fsok = 1;
49442+
49443+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
49444+ for (i = 0; i < num; i++) {
49445+ curuid = (int)uidlist[i];
49446+ if (real == curuid)
49447+ realok = 1;
49448+ if (effective == curuid)
49449+ effectiveok = 1;
49450+ if (fs == curuid)
49451+ fsok = 1;
49452+ }
49453+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
49454+ for (i = 0; i < num; i++) {
49455+ curuid = (int)uidlist[i];
49456+ if (real == curuid)
49457+ break;
49458+ if (effective == curuid)
49459+ break;
49460+ if (fs == curuid)
49461+ break;
49462+ }
49463+ /* not in deny list */
49464+ if (i == num) {
49465+ realok = 1;
49466+ effectiveok = 1;
49467+ fsok = 1;
49468+ }
49469+ }
49470+
49471+ if (realok && effectiveok && fsok)
49472+ return 0;
49473+ else {
49474+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49475+ return 1;
49476+ }
49477+}
49478+
49479+int
49480+gr_check_group_change(int real, int effective, int fs)
49481+{
49482+ unsigned int i;
49483+ __u16 num;
49484+ gid_t *gidlist;
49485+ int curgid;
49486+ int realok = 0;
49487+ int effectiveok = 0;
49488+ int fsok = 0;
49489+
49490+ if (unlikely(!(gr_status & GR_READY)))
49491+ return 0;
49492+
49493+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49494+ gr_log_learn_id_change('g', real, effective, fs);
49495+
49496+ num = current->acl->group_trans_num;
49497+ gidlist = current->acl->group_transitions;
49498+
49499+ if (gidlist == NULL)
49500+ return 0;
49501+
49502+ if (real == -1)
49503+ realok = 1;
49504+ if (effective == -1)
49505+ effectiveok = 1;
49506+ if (fs == -1)
49507+ fsok = 1;
49508+
49509+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
49510+ for (i = 0; i < num; i++) {
49511+ curgid = (int)gidlist[i];
49512+ if (real == curgid)
49513+ realok = 1;
49514+ if (effective == curgid)
49515+ effectiveok = 1;
49516+ if (fs == curgid)
49517+ fsok = 1;
49518+ }
49519+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
49520+ for (i = 0; i < num; i++) {
49521+ curgid = (int)gidlist[i];
49522+ if (real == curgid)
49523+ break;
49524+ if (effective == curgid)
49525+ break;
49526+ if (fs == curgid)
49527+ break;
49528+ }
49529+ /* not in deny list */
49530+ if (i == num) {
49531+ realok = 1;
49532+ effectiveok = 1;
49533+ fsok = 1;
49534+ }
49535+ }
49536+
49537+ if (realok && effectiveok && fsok)
49538+ return 0;
49539+ else {
49540+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49541+ return 1;
49542+ }
49543+}
49544+
49545+void
49546+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49547+{
49548+ struct acl_role_label *role = task->role;
49549+ struct acl_subject_label *subj = NULL;
49550+ struct acl_object_label *obj;
49551+ struct file *filp;
49552+
49553+ if (unlikely(!(gr_status & GR_READY)))
49554+ return;
49555+
49556+ filp = task->exec_file;
49557+
49558+ /* kernel process, we'll give them the kernel role */
49559+ if (unlikely(!filp)) {
49560+ task->role = kernel_role;
49561+ task->acl = kernel_role->root_label;
49562+ return;
49563+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49564+ role = lookup_acl_role_label(task, uid, gid);
49565+
49566+ /* perform subject lookup in possibly new role
49567+ we can use this result below in the case where role == task->role
49568+ */
49569+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49570+
49571+ /* if we changed uid/gid, but result in the same role
49572+ and are using inheritance, don't lose the inherited subject
49573+ if current subject is other than what normal lookup
49574+ would result in, we arrived via inheritance, don't
49575+ lose subject
49576+ */
49577+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49578+ (subj == task->acl)))
49579+ task->acl = subj;
49580+
49581+ task->role = role;
49582+
49583+ task->is_writable = 0;
49584+
49585+ /* ignore additional mmap checks for processes that are writable
49586+ by the default ACL */
49587+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49588+ if (unlikely(obj->mode & GR_WRITE))
49589+ task->is_writable = 1;
49590+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49591+ if (unlikely(obj->mode & GR_WRITE))
49592+ task->is_writable = 1;
49593+
49594+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49595+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49596+#endif
49597+
49598+ gr_set_proc_res(task);
49599+
49600+ return;
49601+}
49602+
49603+int
49604+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49605+ const int unsafe_share)
49606+{
49607+ struct task_struct *task = current;
49608+ struct acl_subject_label *newacl;
49609+ struct acl_object_label *obj;
49610+ __u32 retmode;
49611+
49612+ if (unlikely(!(gr_status & GR_READY)))
49613+ return 0;
49614+
49615+ newacl = chk_subj_label(dentry, mnt, task->role);
49616+
49617+ task_lock(task);
49618+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49619+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49620+ !(task->role->roletype & GR_ROLE_GOD) &&
49621+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49622+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49623+ task_unlock(task);
49624+ if (unsafe_share)
49625+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
49626+ else
49627+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
49628+ return -EACCES;
49629+ }
49630+ task_unlock(task);
49631+
49632+ obj = chk_obj_label(dentry, mnt, task->acl);
49633+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
49634+
49635+ if (!(task->acl->mode & GR_INHERITLEARN) &&
49636+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
49637+ if (obj->nested)
49638+ task->acl = obj->nested;
49639+ else
49640+ task->acl = newacl;
49641+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
49642+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
49643+
49644+ task->is_writable = 0;
49645+
49646+ /* ignore additional mmap checks for processes that are writable
49647+ by the default ACL */
49648+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
49649+ if (unlikely(obj->mode & GR_WRITE))
49650+ task->is_writable = 1;
49651+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
49652+ if (unlikely(obj->mode & GR_WRITE))
49653+ task->is_writable = 1;
49654+
49655+ gr_set_proc_res(task);
49656+
49657+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49658+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49659+#endif
49660+ return 0;
49661+}
49662+
49663+/* always called with valid inodev ptr */
49664+static void
49665+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
49666+{
49667+ struct acl_object_label *matchpo;
49668+ struct acl_subject_label *matchps;
49669+ struct acl_subject_label *subj;
49670+ struct acl_role_label *role;
49671+ unsigned int x;
49672+
49673+ FOR_EACH_ROLE_START(role)
49674+ FOR_EACH_SUBJECT_START(role, subj, x)
49675+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
49676+ matchpo->mode |= GR_DELETED;
49677+ FOR_EACH_SUBJECT_END(subj,x)
49678+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49679+ if (subj->inode == ino && subj->device == dev)
49680+ subj->mode |= GR_DELETED;
49681+ FOR_EACH_NESTED_SUBJECT_END(subj)
49682+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
49683+ matchps->mode |= GR_DELETED;
49684+ FOR_EACH_ROLE_END(role)
49685+
49686+ inodev->nentry->deleted = 1;
49687+
49688+ return;
49689+}
49690+
49691+void
49692+gr_handle_delete(const ino_t ino, const dev_t dev)
49693+{
49694+ struct inodev_entry *inodev;
49695+
49696+ if (unlikely(!(gr_status & GR_READY)))
49697+ return;
49698+
49699+ write_lock(&gr_inode_lock);
49700+ inodev = lookup_inodev_entry(ino, dev);
49701+ if (inodev != NULL)
49702+ do_handle_delete(inodev, ino, dev);
49703+ write_unlock(&gr_inode_lock);
49704+
49705+ return;
49706+}
49707+
49708+static void
49709+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
49710+ const ino_t newinode, const dev_t newdevice,
49711+ struct acl_subject_label *subj)
49712+{
49713+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
49714+ struct acl_object_label *match;
49715+
49716+ match = subj->obj_hash[index];
49717+
49718+ while (match && (match->inode != oldinode ||
49719+ match->device != olddevice ||
49720+ !(match->mode & GR_DELETED)))
49721+ match = match->next;
49722+
49723+ if (match && (match->inode == oldinode)
49724+ && (match->device == olddevice)
49725+ && (match->mode & GR_DELETED)) {
49726+ if (match->prev == NULL) {
49727+ subj->obj_hash[index] = match->next;
49728+ if (match->next != NULL)
49729+ match->next->prev = NULL;
49730+ } else {
49731+ match->prev->next = match->next;
49732+ if (match->next != NULL)
49733+ match->next->prev = match->prev;
49734+ }
49735+ match->prev = NULL;
49736+ match->next = NULL;
49737+ match->inode = newinode;
49738+ match->device = newdevice;
49739+ match->mode &= ~GR_DELETED;
49740+
49741+ insert_acl_obj_label(match, subj);
49742+ }
49743+
49744+ return;
49745+}
49746+
49747+static void
49748+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
49749+ const ino_t newinode, const dev_t newdevice,
49750+ struct acl_role_label *role)
49751+{
49752+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
49753+ struct acl_subject_label *match;
49754+
49755+ match = role->subj_hash[index];
49756+
49757+ while (match && (match->inode != oldinode ||
49758+ match->device != olddevice ||
49759+ !(match->mode & GR_DELETED)))
49760+ match = match->next;
49761+
49762+ if (match && (match->inode == oldinode)
49763+ && (match->device == olddevice)
49764+ && (match->mode & GR_DELETED)) {
49765+ if (match->prev == NULL) {
49766+ role->subj_hash[index] = match->next;
49767+ if (match->next != NULL)
49768+ match->next->prev = NULL;
49769+ } else {
49770+ match->prev->next = match->next;
49771+ if (match->next != NULL)
49772+ match->next->prev = match->prev;
49773+ }
49774+ match->prev = NULL;
49775+ match->next = NULL;
49776+ match->inode = newinode;
49777+ match->device = newdevice;
49778+ match->mode &= ~GR_DELETED;
49779+
49780+ insert_acl_subj_label(match, role);
49781+ }
49782+
49783+ return;
49784+}
49785+
49786+static void
49787+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
49788+ const ino_t newinode, const dev_t newdevice)
49789+{
49790+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
49791+ struct inodev_entry *match;
49792+
49793+ match = inodev_set.i_hash[index];
49794+
49795+ while (match && (match->nentry->inode != oldinode ||
49796+ match->nentry->device != olddevice || !match->nentry->deleted))
49797+ match = match->next;
49798+
49799+ if (match && (match->nentry->inode == oldinode)
49800+ && (match->nentry->device == olddevice) &&
49801+ match->nentry->deleted) {
49802+ if (match->prev == NULL) {
49803+ inodev_set.i_hash[index] = match->next;
49804+ if (match->next != NULL)
49805+ match->next->prev = NULL;
49806+ } else {
49807+ match->prev->next = match->next;
49808+ if (match->next != NULL)
49809+ match->next->prev = match->prev;
49810+ }
49811+ match->prev = NULL;
49812+ match->next = NULL;
49813+ match->nentry->inode = newinode;
49814+ match->nentry->device = newdevice;
49815+ match->nentry->deleted = 0;
49816+
49817+ insert_inodev_entry(match);
49818+ }
49819+
49820+ return;
49821+}
49822+
49823+static void
49824+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
49825+ const struct vfsmount *mnt)
49826+{
49827+ struct acl_subject_label *subj;
49828+ struct acl_role_label *role;
49829+ unsigned int x;
49830+ ino_t inode = dentry->d_inode->i_ino;
49831+ dev_t dev = __get_dev(dentry);
49832+
49833+ FOR_EACH_ROLE_START(role)
49834+ update_acl_subj_label(matchn->inode, matchn->device,
49835+ inode, dev, role);
49836+
49837+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49838+ if ((subj->inode == inode) && (subj->device == dev)) {
49839+ subj->inode = inode;
49840+ subj->device = dev;
49841+ }
49842+ FOR_EACH_NESTED_SUBJECT_END(subj)
49843+ FOR_EACH_SUBJECT_START(role, subj, x)
49844+ update_acl_obj_label(matchn->inode, matchn->device,
49845+ inode, dev, subj);
49846+ FOR_EACH_SUBJECT_END(subj,x)
49847+ FOR_EACH_ROLE_END(role)
49848+
49849+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
49850+
49851+ return;
49852+}
49853+
49854+void
49855+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
49856+{
49857+ struct name_entry *matchn;
49858+
49859+ if (unlikely(!(gr_status & GR_READY)))
49860+ return;
49861+
49862+ preempt_disable();
49863+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
49864+
49865+ if (unlikely((unsigned long)matchn)) {
49866+ write_lock(&gr_inode_lock);
49867+ do_handle_create(matchn, dentry, mnt);
49868+ write_unlock(&gr_inode_lock);
49869+ }
49870+ preempt_enable();
49871+
49872+ return;
49873+}
49874+
49875+void
49876+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
49877+ struct dentry *old_dentry,
49878+ struct dentry *new_dentry,
49879+ struct vfsmount *mnt, const __u8 replace)
49880+{
49881+ struct name_entry *matchn;
49882+ struct inodev_entry *inodev;
49883+ ino_t oldinode = old_dentry->d_inode->i_ino;
49884+ dev_t olddev = __get_dev(old_dentry);
49885+
49886+ /* vfs_rename swaps the name and parent link for old_dentry and
49887+ new_dentry
49888+ at this point, old_dentry has the new name, parent link, and inode
49889+ for the renamed file
49890+ if a file is being replaced by a rename, new_dentry has the inode
49891+ and name for the replaced file
49892+ */
49893+
49894+ if (unlikely(!(gr_status & GR_READY)))
49895+ return;
49896+
49897+ preempt_disable();
49898+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
49899+
49900+ /* we wouldn't have to check d_inode if it weren't for
49901+ NFS silly-renaming
49902+ */
49903+
49904+ write_lock(&gr_inode_lock);
49905+ if (unlikely(replace && new_dentry->d_inode)) {
49906+ ino_t newinode = new_dentry->d_inode->i_ino;
49907+ dev_t newdev = __get_dev(new_dentry);
49908+ inodev = lookup_inodev_entry(newinode, newdev);
49909+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
49910+ do_handle_delete(inodev, newinode, newdev);
49911+ }
49912+
49913+ inodev = lookup_inodev_entry(oldinode, olddev);
49914+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
49915+ do_handle_delete(inodev, oldinode, olddev);
49916+
49917+ if (unlikely((unsigned long)matchn))
49918+ do_handle_create(matchn, old_dentry, mnt);
49919+
49920+ write_unlock(&gr_inode_lock);
49921+ preempt_enable();
49922+
49923+ return;
49924+}
49925+
49926+static int
49927+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
49928+ unsigned char **sum)
49929+{
49930+ struct acl_role_label *r;
49931+ struct role_allowed_ip *ipp;
49932+ struct role_transition *trans;
49933+ unsigned int i;
49934+ int found = 0;
49935+ u32 curr_ip = current->signal->curr_ip;
49936+
49937+ current->signal->saved_ip = curr_ip;
49938+
49939+ /* check transition table */
49940+
49941+ for (trans = current->role->transitions; trans; trans = trans->next) {
49942+ if (!strcmp(rolename, trans->rolename)) {
49943+ found = 1;
49944+ break;
49945+ }
49946+ }
49947+
49948+ if (!found)
49949+ return 0;
49950+
49951+ /* handle special roles that do not require authentication
49952+ and check ip */
49953+
49954+ FOR_EACH_ROLE_START(r)
49955+ if (!strcmp(rolename, r->rolename) &&
49956+ (r->roletype & GR_ROLE_SPECIAL)) {
49957+ found = 0;
49958+ if (r->allowed_ips != NULL) {
49959+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
49960+ if ((ntohl(curr_ip) & ipp->netmask) ==
49961+ (ntohl(ipp->addr) & ipp->netmask))
49962+ found = 1;
49963+ }
49964+ } else
49965+ found = 2;
49966+ if (!found)
49967+ return 0;
49968+
49969+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
49970+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
49971+ *salt = NULL;
49972+ *sum = NULL;
49973+ return 1;
49974+ }
49975+ }
49976+ FOR_EACH_ROLE_END(r)
49977+
49978+ for (i = 0; i < num_sprole_pws; i++) {
49979+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
49980+ *salt = acl_special_roles[i]->salt;
49981+ *sum = acl_special_roles[i]->sum;
49982+ return 1;
49983+ }
49984+ }
49985+
49986+ return 0;
49987+}
49988+
49989+static void
49990+assign_special_role(char *rolename)
49991+{
49992+ struct acl_object_label *obj;
49993+ struct acl_role_label *r;
49994+ struct acl_role_label *assigned = NULL;
49995+ struct task_struct *tsk;
49996+ struct file *filp;
49997+
49998+ FOR_EACH_ROLE_START(r)
49999+ if (!strcmp(rolename, r->rolename) &&
50000+ (r->roletype & GR_ROLE_SPECIAL)) {
50001+ assigned = r;
50002+ break;
50003+ }
50004+ FOR_EACH_ROLE_END(r)
50005+
50006+ if (!assigned)
50007+ return;
50008+
50009+ read_lock(&tasklist_lock);
50010+ read_lock(&grsec_exec_file_lock);
50011+
50012+ tsk = current->real_parent;
50013+ if (tsk == NULL)
50014+ goto out_unlock;
50015+
50016+ filp = tsk->exec_file;
50017+ if (filp == NULL)
50018+ goto out_unlock;
50019+
50020+ tsk->is_writable = 0;
50021+
50022+ tsk->acl_sp_role = 1;
50023+ tsk->acl_role_id = ++acl_sp_role_value;
50024+ tsk->role = assigned;
50025+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
50026+
50027+ /* ignore additional mmap checks for processes that are writable
50028+ by the default ACL */
50029+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50030+ if (unlikely(obj->mode & GR_WRITE))
50031+ tsk->is_writable = 1;
50032+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
50033+ if (unlikely(obj->mode & GR_WRITE))
50034+ tsk->is_writable = 1;
50035+
50036+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50037+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
50038+#endif
50039+
50040+out_unlock:
50041+ read_unlock(&grsec_exec_file_lock);
50042+ read_unlock(&tasklist_lock);
50043+ return;
50044+}
50045+
50046+int gr_check_secure_terminal(struct task_struct *task)
50047+{
50048+ struct task_struct *p, *p2, *p3;
50049+ struct files_struct *files;
50050+ struct fdtable *fdt;
50051+ struct file *our_file = NULL, *file;
50052+ int i;
50053+
50054+ if (task->signal->tty == NULL)
50055+ return 1;
50056+
50057+ files = get_files_struct(task);
50058+ if (files != NULL) {
50059+ rcu_read_lock();
50060+ fdt = files_fdtable(files);
50061+ for (i=0; i < fdt->max_fds; i++) {
50062+ file = fcheck_files(files, i);
50063+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
50064+ get_file(file);
50065+ our_file = file;
50066+ }
50067+ }
50068+ rcu_read_unlock();
50069+ put_files_struct(files);
50070+ }
50071+
50072+ if (our_file == NULL)
50073+ return 1;
50074+
50075+ read_lock(&tasklist_lock);
50076+ do_each_thread(p2, p) {
50077+ files = get_files_struct(p);
50078+ if (files == NULL ||
50079+ (p->signal && p->signal->tty == task->signal->tty)) {
50080+ if (files != NULL)
50081+ put_files_struct(files);
50082+ continue;
50083+ }
50084+ rcu_read_lock();
50085+ fdt = files_fdtable(files);
50086+ for (i=0; i < fdt->max_fds; i++) {
50087+ file = fcheck_files(files, i);
50088+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
50089+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
50090+ p3 = task;
50091+ while (p3->pid > 0) {
50092+ if (p3 == p)
50093+ break;
50094+ p3 = p3->real_parent;
50095+ }
50096+ if (p3 == p)
50097+ break;
50098+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
50099+ gr_handle_alertkill(p);
50100+ rcu_read_unlock();
50101+ put_files_struct(files);
50102+ read_unlock(&tasklist_lock);
50103+ fput(our_file);
50104+ return 0;
50105+ }
50106+ }
50107+ rcu_read_unlock();
50108+ put_files_struct(files);
50109+ } while_each_thread(p2, p);
50110+ read_unlock(&tasklist_lock);
50111+
50112+ fput(our_file);
50113+ return 1;
50114+}
50115+
50116+ssize_t
50117+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
50118+{
50119+ struct gr_arg_wrapper uwrap;
50120+ unsigned char *sprole_salt = NULL;
50121+ unsigned char *sprole_sum = NULL;
50122+ int error = sizeof (struct gr_arg_wrapper);
50123+ int error2 = 0;
50124+
50125+ mutex_lock(&gr_dev_mutex);
50126+
50127+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
50128+ error = -EPERM;
50129+ goto out;
50130+ }
50131+
50132+ if (count != sizeof (struct gr_arg_wrapper)) {
50133+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
50134+ error = -EINVAL;
50135+ goto out;
50136+ }
50137+
50138+
50139+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
50140+ gr_auth_expires = 0;
50141+ gr_auth_attempts = 0;
50142+ }
50143+
50144+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
50145+ error = -EFAULT;
50146+ goto out;
50147+ }
50148+
50149+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
50150+ error = -EINVAL;
50151+ goto out;
50152+ }
50153+
50154+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
50155+ error = -EFAULT;
50156+ goto out;
50157+ }
50158+
50159+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50160+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50161+ time_after(gr_auth_expires, get_seconds())) {
50162+ error = -EBUSY;
50163+ goto out;
50164+ }
50165+
50166+ /* if non-root trying to do anything other than use a special role,
50167+ do not attempt authentication, do not count towards authentication
50168+ locking
50169+ */
50170+
50171+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
50172+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50173+ current_uid()) {
50174+ error = -EPERM;
50175+ goto out;
50176+ }
50177+
50178+ /* ensure pw and special role name are null terminated */
50179+
50180+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
50181+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
50182+
50183+ /* Okay.
50184+ * We have our enough of the argument structure..(we have yet
50185+ * to copy_from_user the tables themselves) . Copy the tables
50186+ * only if we need them, i.e. for loading operations. */
50187+
50188+ switch (gr_usermode->mode) {
50189+ case GR_STATUS:
50190+ if (gr_status & GR_READY) {
50191+ error = 1;
50192+ if (!gr_check_secure_terminal(current))
50193+ error = 3;
50194+ } else
50195+ error = 2;
50196+ goto out;
50197+ case GR_SHUTDOWN:
50198+ if ((gr_status & GR_READY)
50199+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50200+ pax_open_kernel();
50201+ gr_status &= ~GR_READY;
50202+ pax_close_kernel();
50203+
50204+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
50205+ free_variables();
50206+ memset(gr_usermode, 0, sizeof (struct gr_arg));
50207+ memset(gr_system_salt, 0, GR_SALT_LEN);
50208+ memset(gr_system_sum, 0, GR_SHA_LEN);
50209+ } else if (gr_status & GR_READY) {
50210+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
50211+ error = -EPERM;
50212+ } else {
50213+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
50214+ error = -EAGAIN;
50215+ }
50216+ break;
50217+ case GR_ENABLE:
50218+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
50219+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
50220+ else {
50221+ if (gr_status & GR_READY)
50222+ error = -EAGAIN;
50223+ else
50224+ error = error2;
50225+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
50226+ }
50227+ break;
50228+ case GR_RELOAD:
50229+ if (!(gr_status & GR_READY)) {
50230+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
50231+ error = -EAGAIN;
50232+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50233+ lock_kernel();
50234+
50235+ pax_open_kernel();
50236+ gr_status &= ~GR_READY;
50237+ pax_close_kernel();
50238+
50239+ free_variables();
50240+ if (!(error2 = gracl_init(gr_usermode))) {
50241+ unlock_kernel();
50242+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
50243+ } else {
50244+ unlock_kernel();
50245+ error = error2;
50246+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50247+ }
50248+ } else {
50249+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50250+ error = -EPERM;
50251+ }
50252+ break;
50253+ case GR_SEGVMOD:
50254+ if (unlikely(!(gr_status & GR_READY))) {
50255+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
50256+ error = -EAGAIN;
50257+ break;
50258+ }
50259+
50260+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50261+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
50262+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
50263+ struct acl_subject_label *segvacl;
50264+ segvacl =
50265+ lookup_acl_subj_label(gr_usermode->segv_inode,
50266+ gr_usermode->segv_device,
50267+ current->role);
50268+ if (segvacl) {
50269+ segvacl->crashes = 0;
50270+ segvacl->expires = 0;
50271+ }
50272+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
50273+ gr_remove_uid(gr_usermode->segv_uid);
50274+ }
50275+ } else {
50276+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
50277+ error = -EPERM;
50278+ }
50279+ break;
50280+ case GR_SPROLE:
50281+ case GR_SPROLEPAM:
50282+ if (unlikely(!(gr_status & GR_READY))) {
50283+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
50284+ error = -EAGAIN;
50285+ break;
50286+ }
50287+
50288+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
50289+ current->role->expires = 0;
50290+ current->role->auth_attempts = 0;
50291+ }
50292+
50293+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50294+ time_after(current->role->expires, get_seconds())) {
50295+ error = -EBUSY;
50296+ goto out;
50297+ }
50298+
50299+ if (lookup_special_role_auth
50300+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
50301+ && ((!sprole_salt && !sprole_sum)
50302+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
50303+ char *p = "";
50304+ assign_special_role(gr_usermode->sp_role);
50305+ read_lock(&tasklist_lock);
50306+ if (current->real_parent)
50307+ p = current->real_parent->role->rolename;
50308+ read_unlock(&tasklist_lock);
50309+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
50310+ p, acl_sp_role_value);
50311+ } else {
50312+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
50313+ error = -EPERM;
50314+ if(!(current->role->auth_attempts++))
50315+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50316+
50317+ goto out;
50318+ }
50319+ break;
50320+ case GR_UNSPROLE:
50321+ if (unlikely(!(gr_status & GR_READY))) {
50322+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
50323+ error = -EAGAIN;
50324+ break;
50325+ }
50326+
50327+ if (current->role->roletype & GR_ROLE_SPECIAL) {
50328+ char *p = "";
50329+ int i = 0;
50330+
50331+ read_lock(&tasklist_lock);
50332+ if (current->real_parent) {
50333+ p = current->real_parent->role->rolename;
50334+ i = current->real_parent->acl_role_id;
50335+ }
50336+ read_unlock(&tasklist_lock);
50337+
50338+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
50339+ gr_set_acls(1);
50340+ } else {
50341+ error = -EPERM;
50342+ goto out;
50343+ }
50344+ break;
50345+ default:
50346+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
50347+ error = -EINVAL;
50348+ break;
50349+ }
50350+
50351+ if (error != -EPERM)
50352+ goto out;
50353+
50354+ if(!(gr_auth_attempts++))
50355+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50356+
50357+ out:
50358+ mutex_unlock(&gr_dev_mutex);
50359+ return error;
50360+}
50361+
50362+/* must be called with
50363+ rcu_read_lock();
50364+ read_lock(&tasklist_lock);
50365+ read_lock(&grsec_exec_file_lock);
50366+*/
50367+int gr_apply_subject_to_task(struct task_struct *task)
50368+{
50369+ struct acl_object_label *obj;
50370+ char *tmpname;
50371+ struct acl_subject_label *tmpsubj;
50372+ struct file *filp;
50373+ struct name_entry *nmatch;
50374+
50375+ filp = task->exec_file;
50376+ if (filp == NULL)
50377+ return 0;
50378+
50379+ /* the following is to apply the correct subject
50380+ on binaries running when the RBAC system
50381+ is enabled, when the binaries have been
50382+ replaced or deleted since their execution
50383+ -----
50384+ when the RBAC system starts, the inode/dev
50385+ from exec_file will be one the RBAC system
50386+ is unaware of. It only knows the inode/dev
50387+ of the present file on disk, or the absence
50388+ of it.
50389+ */
50390+ preempt_disable();
50391+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
50392+
50393+ nmatch = lookup_name_entry(tmpname);
50394+ preempt_enable();
50395+ tmpsubj = NULL;
50396+ if (nmatch) {
50397+ if (nmatch->deleted)
50398+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
50399+ else
50400+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
50401+ if (tmpsubj != NULL)
50402+ task->acl = tmpsubj;
50403+ }
50404+ if (tmpsubj == NULL)
50405+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
50406+ task->role);
50407+ if (task->acl) {
50408+ task->is_writable = 0;
50409+ /* ignore additional mmap checks for processes that are writable
50410+ by the default ACL */
50411+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50412+ if (unlikely(obj->mode & GR_WRITE))
50413+ task->is_writable = 1;
50414+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50415+ if (unlikely(obj->mode & GR_WRITE))
50416+ task->is_writable = 1;
50417+
50418+ gr_set_proc_res(task);
50419+
50420+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50421+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50422+#endif
50423+ } else {
50424+ return 1;
50425+ }
50426+
50427+ return 0;
50428+}
50429+
50430+int
50431+gr_set_acls(const int type)
50432+{
50433+ struct task_struct *task, *task2;
50434+ struct acl_role_label *role = current->role;
50435+ __u16 acl_role_id = current->acl_role_id;
50436+ const struct cred *cred;
50437+ int ret;
50438+
50439+ rcu_read_lock();
50440+ read_lock(&tasklist_lock);
50441+ read_lock(&grsec_exec_file_lock);
50442+ do_each_thread(task2, task) {
50443+ /* check to see if we're called from the exit handler,
50444+ if so, only replace ACLs that have inherited the admin
50445+ ACL */
50446+
50447+ if (type && (task->role != role ||
50448+ task->acl_role_id != acl_role_id))
50449+ continue;
50450+
50451+ task->acl_role_id = 0;
50452+ task->acl_sp_role = 0;
50453+
50454+ if (task->exec_file) {
50455+ cred = __task_cred(task);
50456+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
50457+
50458+ ret = gr_apply_subject_to_task(task);
50459+ if (ret) {
50460+ read_unlock(&grsec_exec_file_lock);
50461+ read_unlock(&tasklist_lock);
50462+ rcu_read_unlock();
50463+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
50464+ return ret;
50465+ }
50466+ } else {
50467+ // it's a kernel process
50468+ task->role = kernel_role;
50469+ task->acl = kernel_role->root_label;
50470+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
50471+ task->acl->mode &= ~GR_PROCFIND;
50472+#endif
50473+ }
50474+ } while_each_thread(task2, task);
50475+ read_unlock(&grsec_exec_file_lock);
50476+ read_unlock(&tasklist_lock);
50477+ rcu_read_unlock();
50478+
50479+ return 0;
50480+}
50481+
50482+void
50483+gr_learn_resource(const struct task_struct *task,
50484+ const int res, const unsigned long wanted, const int gt)
50485+{
50486+ struct acl_subject_label *acl;
50487+ const struct cred *cred;
50488+
50489+ if (unlikely((gr_status & GR_READY) &&
50490+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
50491+ goto skip_reslog;
50492+
50493+#ifdef CONFIG_GRKERNSEC_RESLOG
50494+ gr_log_resource(task, res, wanted, gt);
50495+#endif
50496+ skip_reslog:
50497+
50498+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
50499+ return;
50500+
50501+ acl = task->acl;
50502+
50503+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
50504+ !(acl->resmask & (1 << (unsigned short) res))))
50505+ return;
50506+
50507+ if (wanted >= acl->res[res].rlim_cur) {
50508+ unsigned long res_add;
50509+
50510+ res_add = wanted;
50511+ switch (res) {
50512+ case RLIMIT_CPU:
50513+ res_add += GR_RLIM_CPU_BUMP;
50514+ break;
50515+ case RLIMIT_FSIZE:
50516+ res_add += GR_RLIM_FSIZE_BUMP;
50517+ break;
50518+ case RLIMIT_DATA:
50519+ res_add += GR_RLIM_DATA_BUMP;
50520+ break;
50521+ case RLIMIT_STACK:
50522+ res_add += GR_RLIM_STACK_BUMP;
50523+ break;
50524+ case RLIMIT_CORE:
50525+ res_add += GR_RLIM_CORE_BUMP;
50526+ break;
50527+ case RLIMIT_RSS:
50528+ res_add += GR_RLIM_RSS_BUMP;
50529+ break;
50530+ case RLIMIT_NPROC:
50531+ res_add += GR_RLIM_NPROC_BUMP;
50532+ break;
50533+ case RLIMIT_NOFILE:
50534+ res_add += GR_RLIM_NOFILE_BUMP;
50535+ break;
50536+ case RLIMIT_MEMLOCK:
50537+ res_add += GR_RLIM_MEMLOCK_BUMP;
50538+ break;
50539+ case RLIMIT_AS:
50540+ res_add += GR_RLIM_AS_BUMP;
50541+ break;
50542+ case RLIMIT_LOCKS:
50543+ res_add += GR_RLIM_LOCKS_BUMP;
50544+ break;
50545+ case RLIMIT_SIGPENDING:
50546+ res_add += GR_RLIM_SIGPENDING_BUMP;
50547+ break;
50548+ case RLIMIT_MSGQUEUE:
50549+ res_add += GR_RLIM_MSGQUEUE_BUMP;
50550+ break;
50551+ case RLIMIT_NICE:
50552+ res_add += GR_RLIM_NICE_BUMP;
50553+ break;
50554+ case RLIMIT_RTPRIO:
50555+ res_add += GR_RLIM_RTPRIO_BUMP;
50556+ break;
50557+ case RLIMIT_RTTIME:
50558+ res_add += GR_RLIM_RTTIME_BUMP;
50559+ break;
50560+ }
50561+
50562+ acl->res[res].rlim_cur = res_add;
50563+
50564+ if (wanted > acl->res[res].rlim_max)
50565+ acl->res[res].rlim_max = res_add;
50566+
50567+ /* only log the subject filename, since resource logging is supported for
50568+ single-subject learning only */
50569+ rcu_read_lock();
50570+ cred = __task_cred(task);
50571+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50572+ task->role->roletype, cred->uid, cred->gid, acl->filename,
50573+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50574+ "", (unsigned long) res, &task->signal->saved_ip);
50575+ rcu_read_unlock();
50576+ }
50577+
50578+ return;
50579+}
50580+
50581+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50582+void
50583+pax_set_initial_flags(struct linux_binprm *bprm)
50584+{
50585+ struct task_struct *task = current;
50586+ struct acl_subject_label *proc;
50587+ unsigned long flags;
50588+
50589+ if (unlikely(!(gr_status & GR_READY)))
50590+ return;
50591+
50592+ flags = pax_get_flags(task);
50593+
50594+ proc = task->acl;
50595+
50596+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
50597+ flags &= ~MF_PAX_PAGEEXEC;
50598+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
50599+ flags &= ~MF_PAX_SEGMEXEC;
50600+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
50601+ flags &= ~MF_PAX_RANDMMAP;
50602+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
50603+ flags &= ~MF_PAX_EMUTRAMP;
50604+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
50605+ flags &= ~MF_PAX_MPROTECT;
50606+
50607+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
50608+ flags |= MF_PAX_PAGEEXEC;
50609+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
50610+ flags |= MF_PAX_SEGMEXEC;
50611+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
50612+ flags |= MF_PAX_RANDMMAP;
50613+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
50614+ flags |= MF_PAX_EMUTRAMP;
50615+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
50616+ flags |= MF_PAX_MPROTECT;
50617+
50618+ pax_set_flags(task, flags);
50619+
50620+ return;
50621+}
50622+#endif
50623+
50624+#ifdef CONFIG_SYSCTL
50625+/* Eric Biederman likes breaking userland ABI and every inode-based security
50626+ system to save 35kb of memory */
50627+
50628+/* we modify the passed in filename, but adjust it back before returning */
50629+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
50630+{
50631+ struct name_entry *nmatch;
50632+ char *p, *lastp = NULL;
50633+ struct acl_object_label *obj = NULL, *tmp;
50634+ struct acl_subject_label *tmpsubj;
50635+ char c = '\0';
50636+
50637+ read_lock(&gr_inode_lock);
50638+
50639+ p = name + len - 1;
50640+ do {
50641+ nmatch = lookup_name_entry(name);
50642+ if (lastp != NULL)
50643+ *lastp = c;
50644+
50645+ if (nmatch == NULL)
50646+ goto next_component;
50647+ tmpsubj = current->acl;
50648+ do {
50649+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
50650+ if (obj != NULL) {
50651+ tmp = obj->globbed;
50652+ while (tmp) {
50653+ if (!glob_match(tmp->filename, name)) {
50654+ obj = tmp;
50655+ goto found_obj;
50656+ }
50657+ tmp = tmp->next;
50658+ }
50659+ goto found_obj;
50660+ }
50661+ } while ((tmpsubj = tmpsubj->parent_subject));
50662+next_component:
50663+ /* end case */
50664+ if (p == name)
50665+ break;
50666+
50667+ while (*p != '/')
50668+ p--;
50669+ if (p == name)
50670+ lastp = p + 1;
50671+ else {
50672+ lastp = p;
50673+ p--;
50674+ }
50675+ c = *lastp;
50676+ *lastp = '\0';
50677+ } while (1);
50678+found_obj:
50679+ read_unlock(&gr_inode_lock);
50680+ /* obj returned will always be non-null */
50681+ return obj;
50682+}
50683+
50684+/* returns 0 when allowing, non-zero on error
50685+ op of 0 is used for readdir, so we don't log the names of hidden files
50686+*/
50687+__u32
50688+gr_handle_sysctl(const struct ctl_table *table, const int op)
50689+{
50690+ ctl_table *tmp;
50691+ const char *proc_sys = "/proc/sys";
50692+ char *path;
50693+ struct acl_object_label *obj;
50694+ unsigned short len = 0, pos = 0, depth = 0, i;
50695+ __u32 err = 0;
50696+ __u32 mode = 0;
50697+
50698+ if (unlikely(!(gr_status & GR_READY)))
50699+ return 0;
50700+
50701+ /* for now, ignore operations on non-sysctl entries if it's not a
50702+ readdir*/
50703+ if (table->child != NULL && op != 0)
50704+ return 0;
50705+
50706+ mode |= GR_FIND;
50707+ /* it's only a read if it's an entry, read on dirs is for readdir */
50708+ if (op & MAY_READ)
50709+ mode |= GR_READ;
50710+ if (op & MAY_WRITE)
50711+ mode |= GR_WRITE;
50712+
50713+ preempt_disable();
50714+
50715+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50716+
50717+ /* it's only a read/write if it's an actual entry, not a dir
50718+ (which are opened for readdir)
50719+ */
50720+
50721+ /* convert the requested sysctl entry into a pathname */
50722+
50723+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50724+ len += strlen(tmp->procname);
50725+ len++;
50726+ depth++;
50727+ }
50728+
50729+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
50730+ /* deny */
50731+ goto out;
50732+ }
50733+
50734+ memset(path, 0, PAGE_SIZE);
50735+
50736+ memcpy(path, proc_sys, strlen(proc_sys));
50737+
50738+ pos += strlen(proc_sys);
50739+
50740+ for (; depth > 0; depth--) {
50741+ path[pos] = '/';
50742+ pos++;
50743+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
50744+ if (depth == i) {
50745+ memcpy(path + pos, tmp->procname,
50746+ strlen(tmp->procname));
50747+ pos += strlen(tmp->procname);
50748+ }
50749+ i++;
50750+ }
50751+ }
50752+
50753+ obj = gr_lookup_by_name(path, pos);
50754+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
50755+
50756+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
50757+ ((err & mode) != mode))) {
50758+ __u32 new_mode = mode;
50759+
50760+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50761+
50762+ err = 0;
50763+ gr_log_learn_sysctl(path, new_mode);
50764+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
50765+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
50766+ err = -ENOENT;
50767+ } else if (!(err & GR_FIND)) {
50768+ err = -ENOENT;
50769+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
50770+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
50771+ path, (mode & GR_READ) ? " reading" : "",
50772+ (mode & GR_WRITE) ? " writing" : "");
50773+ err = -EACCES;
50774+ } else if ((err & mode) != mode) {
50775+ err = -EACCES;
50776+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
50777+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
50778+ path, (mode & GR_READ) ? " reading" : "",
50779+ (mode & GR_WRITE) ? " writing" : "");
50780+ err = 0;
50781+ } else
50782+ err = 0;
50783+
50784+ out:
50785+ preempt_enable();
50786+
50787+ return err;
50788+}
50789+#endif
50790+
50791+int
50792+gr_handle_proc_ptrace(struct task_struct *task)
50793+{
50794+ struct file *filp;
50795+ struct task_struct *tmp = task;
50796+ struct task_struct *curtemp = current;
50797+ __u32 retmode;
50798+
50799+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50800+ if (unlikely(!(gr_status & GR_READY)))
50801+ return 0;
50802+#endif
50803+
50804+ read_lock(&tasklist_lock);
50805+ read_lock(&grsec_exec_file_lock);
50806+ filp = task->exec_file;
50807+
50808+ while (tmp->pid > 0) {
50809+ if (tmp == curtemp)
50810+ break;
50811+ tmp = tmp->real_parent;
50812+ }
50813+
50814+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50815+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
50816+ read_unlock(&grsec_exec_file_lock);
50817+ read_unlock(&tasklist_lock);
50818+ return 1;
50819+ }
50820+
50821+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50822+ if (!(gr_status & GR_READY)) {
50823+ read_unlock(&grsec_exec_file_lock);
50824+ read_unlock(&tasklist_lock);
50825+ return 0;
50826+ }
50827+#endif
50828+
50829+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
50830+ read_unlock(&grsec_exec_file_lock);
50831+ read_unlock(&tasklist_lock);
50832+
50833+ if (retmode & GR_NOPTRACE)
50834+ return 1;
50835+
50836+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
50837+ && (current->acl != task->acl || (current->acl != current->role->root_label
50838+ && current->pid != task->pid)))
50839+ return 1;
50840+
50841+ return 0;
50842+}
50843+
50844+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
50845+{
50846+ if (unlikely(!(gr_status & GR_READY)))
50847+ return;
50848+
50849+ if (!(current->role->roletype & GR_ROLE_GOD))
50850+ return;
50851+
50852+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
50853+ p->role->rolename, gr_task_roletype_to_char(p),
50854+ p->acl->filename);
50855+}
50856+
50857+int
50858+gr_handle_ptrace(struct task_struct *task, const long request)
50859+{
50860+ struct task_struct *tmp = task;
50861+ struct task_struct *curtemp = current;
50862+ __u32 retmode;
50863+
50864+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
50865+ if (unlikely(!(gr_status & GR_READY)))
50866+ return 0;
50867+#endif
50868+
50869+ read_lock(&tasklist_lock);
50870+ while (tmp->pid > 0) {
50871+ if (tmp == curtemp)
50872+ break;
50873+ tmp = tmp->real_parent;
50874+ }
50875+
50876+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
50877+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
50878+ read_unlock(&tasklist_lock);
50879+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50880+ return 1;
50881+ }
50882+ read_unlock(&tasklist_lock);
50883+
50884+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50885+ if (!(gr_status & GR_READY))
50886+ return 0;
50887+#endif
50888+
50889+ read_lock(&grsec_exec_file_lock);
50890+ if (unlikely(!task->exec_file)) {
50891+ read_unlock(&grsec_exec_file_lock);
50892+ return 0;
50893+ }
50894+
50895+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
50896+ read_unlock(&grsec_exec_file_lock);
50897+
50898+ if (retmode & GR_NOPTRACE) {
50899+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50900+ return 1;
50901+ }
50902+
50903+ if (retmode & GR_PTRACERD) {
50904+ switch (request) {
50905+ case PTRACE_POKETEXT:
50906+ case PTRACE_POKEDATA:
50907+ case PTRACE_POKEUSR:
50908+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
50909+ case PTRACE_SETREGS:
50910+ case PTRACE_SETFPREGS:
50911+#endif
50912+#ifdef CONFIG_X86
50913+ case PTRACE_SETFPXREGS:
50914+#endif
50915+#ifdef CONFIG_ALTIVEC
50916+ case PTRACE_SETVRREGS:
50917+#endif
50918+ return 1;
50919+ default:
50920+ return 0;
50921+ }
50922+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
50923+ !(current->role->roletype & GR_ROLE_GOD) &&
50924+ (current->acl != task->acl)) {
50925+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
50926+ return 1;
50927+ }
50928+
50929+ return 0;
50930+}
50931+
50932+static int is_writable_mmap(const struct file *filp)
50933+{
50934+ struct task_struct *task = current;
50935+ struct acl_object_label *obj, *obj2;
50936+
50937+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
50938+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
50939+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50940+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
50941+ task->role->root_label);
50942+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
50943+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
50944+ return 1;
50945+ }
50946+ }
50947+ return 0;
50948+}
50949+
50950+int
50951+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
50952+{
50953+ __u32 mode;
50954+
50955+ if (unlikely(!file || !(prot & PROT_EXEC)))
50956+ return 1;
50957+
50958+ if (is_writable_mmap(file))
50959+ return 0;
50960+
50961+ mode =
50962+ gr_search_file(file->f_path.dentry,
50963+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50964+ file->f_path.mnt);
50965+
50966+ if (!gr_tpe_allow(file))
50967+ return 0;
50968+
50969+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
50970+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50971+ return 0;
50972+ } else if (unlikely(!(mode & GR_EXEC))) {
50973+ return 0;
50974+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
50975+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
50976+ return 1;
50977+ }
50978+
50979+ return 1;
50980+}
50981+
50982+int
50983+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50984+{
50985+ __u32 mode;
50986+
50987+ if (unlikely(!file || !(prot & PROT_EXEC)))
50988+ return 1;
50989+
50990+ if (is_writable_mmap(file))
50991+ return 0;
50992+
50993+ mode =
50994+ gr_search_file(file->f_path.dentry,
50995+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
50996+ file->f_path.mnt);
50997+
50998+ if (!gr_tpe_allow(file))
50999+ return 0;
51000+
51001+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
51002+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51003+ return 0;
51004+ } else if (unlikely(!(mode & GR_EXEC))) {
51005+ return 0;
51006+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
51007+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51008+ return 1;
51009+ }
51010+
51011+ return 1;
51012+}
51013+
51014+void
51015+gr_acl_handle_psacct(struct task_struct *task, const long code)
51016+{
51017+ unsigned long runtime;
51018+ unsigned long cputime;
51019+ unsigned int wday, cday;
51020+ __u8 whr, chr;
51021+ __u8 wmin, cmin;
51022+ __u8 wsec, csec;
51023+ struct timespec timeval;
51024+
51025+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
51026+ !(task->acl->mode & GR_PROCACCT)))
51027+ return;
51028+
51029+ do_posix_clock_monotonic_gettime(&timeval);
51030+ runtime = timeval.tv_sec - task->start_time.tv_sec;
51031+ wday = runtime / (3600 * 24);
51032+ runtime -= wday * (3600 * 24);
51033+ whr = runtime / 3600;
51034+ runtime -= whr * 3600;
51035+ wmin = runtime / 60;
51036+ runtime -= wmin * 60;
51037+ wsec = runtime;
51038+
51039+ cputime = (task->utime + task->stime) / HZ;
51040+ cday = cputime / (3600 * 24);
51041+ cputime -= cday * (3600 * 24);
51042+ chr = cputime / 3600;
51043+ cputime -= chr * 3600;
51044+ cmin = cputime / 60;
51045+ cputime -= cmin * 60;
51046+ csec = cputime;
51047+
51048+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
51049+
51050+ return;
51051+}
51052+
51053+void gr_set_kernel_label(struct task_struct *task)
51054+{
51055+ if (gr_status & GR_READY) {
51056+ task->role = kernel_role;
51057+ task->acl = kernel_role->root_label;
51058+ }
51059+ return;
51060+}
51061+
51062+#ifdef CONFIG_TASKSTATS
51063+int gr_is_taskstats_denied(int pid)
51064+{
51065+ struct task_struct *task;
51066+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51067+ const struct cred *cred;
51068+#endif
51069+ int ret = 0;
51070+
51071+ /* restrict taskstats viewing to un-chrooted root users
51072+ who have the 'view' subject flag if the RBAC system is enabled
51073+ */
51074+
51075+ rcu_read_lock();
51076+ read_lock(&tasklist_lock);
51077+ task = find_task_by_vpid(pid);
51078+ if (task) {
51079+#ifdef CONFIG_GRKERNSEC_CHROOT
51080+ if (proc_is_chrooted(task))
51081+ ret = -EACCES;
51082+#endif
51083+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51084+ cred = __task_cred(task);
51085+#ifdef CONFIG_GRKERNSEC_PROC_USER
51086+ if (cred->uid != 0)
51087+ ret = -EACCES;
51088+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51089+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
51090+ ret = -EACCES;
51091+#endif
51092+#endif
51093+ if (gr_status & GR_READY) {
51094+ if (!(task->acl->mode & GR_VIEW))
51095+ ret = -EACCES;
51096+ }
51097+ } else
51098+ ret = -ENOENT;
51099+
51100+ read_unlock(&tasklist_lock);
51101+ rcu_read_unlock();
51102+
51103+ return ret;
51104+}
51105+#endif
51106+
51107+/* AUXV entries are filled via a descendant of search_binary_handler
51108+ after we've already applied the subject for the target
51109+*/
51110+int gr_acl_enable_at_secure(void)
51111+{
51112+ if (unlikely(!(gr_status & GR_READY)))
51113+ return 0;
51114+
51115+ if (current->acl->mode & GR_ATSECURE)
51116+ return 1;
51117+
51118+ return 0;
51119+}
51120+
51121+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
51122+{
51123+ struct task_struct *task = current;
51124+ struct dentry *dentry = file->f_path.dentry;
51125+ struct vfsmount *mnt = file->f_path.mnt;
51126+ struct acl_object_label *obj, *tmp;
51127+ struct acl_subject_label *subj;
51128+ unsigned int bufsize;
51129+ int is_not_root;
51130+ char *path;
51131+ dev_t dev = __get_dev(dentry);
51132+
51133+ if (unlikely(!(gr_status & GR_READY)))
51134+ return 1;
51135+
51136+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
51137+ return 1;
51138+
51139+ /* ignore Eric Biederman */
51140+ if (IS_PRIVATE(dentry->d_inode))
51141+ return 1;
51142+
51143+ subj = task->acl;
51144+ do {
51145+ obj = lookup_acl_obj_label(ino, dev, subj);
51146+ if (obj != NULL)
51147+ return (obj->mode & GR_FIND) ? 1 : 0;
51148+ } while ((subj = subj->parent_subject));
51149+
51150+ /* this is purely an optimization since we're looking for an object
51151+ for the directory we're doing a readdir on
51152+ if it's possible for any globbed object to match the entry we're
51153+ filling into the directory, then the object we find here will be
51154+ an anchor point with attached globbed objects
51155+ */
51156+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
51157+ if (obj->globbed == NULL)
51158+ return (obj->mode & GR_FIND) ? 1 : 0;
51159+
51160+ is_not_root = ((obj->filename[0] == '/') &&
51161+ (obj->filename[1] == '\0')) ? 0 : 1;
51162+ bufsize = PAGE_SIZE - namelen - is_not_root;
51163+
51164+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
51165+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
51166+ return 1;
51167+
51168+ preempt_disable();
51169+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51170+ bufsize);
51171+
51172+ bufsize = strlen(path);
51173+
51174+ /* if base is "/", don't append an additional slash */
51175+ if (is_not_root)
51176+ *(path + bufsize) = '/';
51177+ memcpy(path + bufsize + is_not_root, name, namelen);
51178+ *(path + bufsize + namelen + is_not_root) = '\0';
51179+
51180+ tmp = obj->globbed;
51181+ while (tmp) {
51182+ if (!glob_match(tmp->filename, path)) {
51183+ preempt_enable();
51184+ return (tmp->mode & GR_FIND) ? 1 : 0;
51185+ }
51186+ tmp = tmp->next;
51187+ }
51188+ preempt_enable();
51189+ return (obj->mode & GR_FIND) ? 1 : 0;
51190+}
51191+
51192+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
51193+EXPORT_SYMBOL(gr_acl_is_enabled);
51194+#endif
51195+EXPORT_SYMBOL(gr_learn_resource);
51196+EXPORT_SYMBOL(gr_set_kernel_label);
51197+#ifdef CONFIG_SECURITY
51198+EXPORT_SYMBOL(gr_check_user_change);
51199+EXPORT_SYMBOL(gr_check_group_change);
51200+#endif
51201+
51202diff -urNp linux-2.6.32.45/grsecurity/gracl_cap.c linux-2.6.32.45/grsecurity/gracl_cap.c
51203--- linux-2.6.32.45/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
51204+++ linux-2.6.32.45/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
51205@@ -0,0 +1,138 @@
51206+#include <linux/kernel.h>
51207+#include <linux/module.h>
51208+#include <linux/sched.h>
51209+#include <linux/gracl.h>
51210+#include <linux/grsecurity.h>
51211+#include <linux/grinternal.h>
51212+
51213+static const char *captab_log[] = {
51214+ "CAP_CHOWN",
51215+ "CAP_DAC_OVERRIDE",
51216+ "CAP_DAC_READ_SEARCH",
51217+ "CAP_FOWNER",
51218+ "CAP_FSETID",
51219+ "CAP_KILL",
51220+ "CAP_SETGID",
51221+ "CAP_SETUID",
51222+ "CAP_SETPCAP",
51223+ "CAP_LINUX_IMMUTABLE",
51224+ "CAP_NET_BIND_SERVICE",
51225+ "CAP_NET_BROADCAST",
51226+ "CAP_NET_ADMIN",
51227+ "CAP_NET_RAW",
51228+ "CAP_IPC_LOCK",
51229+ "CAP_IPC_OWNER",
51230+ "CAP_SYS_MODULE",
51231+ "CAP_SYS_RAWIO",
51232+ "CAP_SYS_CHROOT",
51233+ "CAP_SYS_PTRACE",
51234+ "CAP_SYS_PACCT",
51235+ "CAP_SYS_ADMIN",
51236+ "CAP_SYS_BOOT",
51237+ "CAP_SYS_NICE",
51238+ "CAP_SYS_RESOURCE",
51239+ "CAP_SYS_TIME",
51240+ "CAP_SYS_TTY_CONFIG",
51241+ "CAP_MKNOD",
51242+ "CAP_LEASE",
51243+ "CAP_AUDIT_WRITE",
51244+ "CAP_AUDIT_CONTROL",
51245+ "CAP_SETFCAP",
51246+ "CAP_MAC_OVERRIDE",
51247+ "CAP_MAC_ADMIN"
51248+};
51249+
51250+EXPORT_SYMBOL(gr_is_capable);
51251+EXPORT_SYMBOL(gr_is_capable_nolog);
51252+
51253+int
51254+gr_is_capable(const int cap)
51255+{
51256+ struct task_struct *task = current;
51257+ const struct cred *cred = current_cred();
51258+ struct acl_subject_label *curracl;
51259+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51260+ kernel_cap_t cap_audit = __cap_empty_set;
51261+
51262+ if (!gr_acl_is_enabled())
51263+ return 1;
51264+
51265+ curracl = task->acl;
51266+
51267+ cap_drop = curracl->cap_lower;
51268+ cap_mask = curracl->cap_mask;
51269+ cap_audit = curracl->cap_invert_audit;
51270+
51271+ while ((curracl = curracl->parent_subject)) {
51272+ /* if the cap isn't specified in the current computed mask but is specified in the
51273+ current level subject, and is lowered in the current level subject, then add
51274+ it to the set of dropped capabilities
51275+ otherwise, add the current level subject's mask to the current computed mask
51276+ */
51277+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51278+ cap_raise(cap_mask, cap);
51279+ if (cap_raised(curracl->cap_lower, cap))
51280+ cap_raise(cap_drop, cap);
51281+ if (cap_raised(curracl->cap_invert_audit, cap))
51282+ cap_raise(cap_audit, cap);
51283+ }
51284+ }
51285+
51286+ if (!cap_raised(cap_drop, cap)) {
51287+ if (cap_raised(cap_audit, cap))
51288+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
51289+ return 1;
51290+ }
51291+
51292+ curracl = task->acl;
51293+
51294+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
51295+ && cap_raised(cred->cap_effective, cap)) {
51296+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51297+ task->role->roletype, cred->uid,
51298+ cred->gid, task->exec_file ?
51299+ gr_to_filename(task->exec_file->f_path.dentry,
51300+ task->exec_file->f_path.mnt) : curracl->filename,
51301+ curracl->filename, 0UL,
51302+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
51303+ return 1;
51304+ }
51305+
51306+ if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
51307+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
51308+ return 0;
51309+}
51310+
51311+int
51312+gr_is_capable_nolog(const int cap)
51313+{
51314+ struct acl_subject_label *curracl;
51315+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51316+
51317+ if (!gr_acl_is_enabled())
51318+ return 1;
51319+
51320+ curracl = current->acl;
51321+
51322+ cap_drop = curracl->cap_lower;
51323+ cap_mask = curracl->cap_mask;
51324+
51325+ while ((curracl = curracl->parent_subject)) {
51326+ /* if the cap isn't specified in the current computed mask but is specified in the
51327+ current level subject, and is lowered in the current level subject, then add
51328+ it to the set of dropped capabilities
51329+ otherwise, add the current level subject's mask to the current computed mask
51330+ */
51331+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51332+ cap_raise(cap_mask, cap);
51333+ if (cap_raised(curracl->cap_lower, cap))
51334+ cap_raise(cap_drop, cap);
51335+ }
51336+ }
51337+
51338+ if (!cap_raised(cap_drop, cap))
51339+ return 1;
51340+
51341+ return 0;
51342+}
51343+
51344diff -urNp linux-2.6.32.45/grsecurity/gracl_fs.c linux-2.6.32.45/grsecurity/gracl_fs.c
51345--- linux-2.6.32.45/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
51346+++ linux-2.6.32.45/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
51347@@ -0,0 +1,431 @@
51348+#include <linux/kernel.h>
51349+#include <linux/sched.h>
51350+#include <linux/types.h>
51351+#include <linux/fs.h>
51352+#include <linux/file.h>
51353+#include <linux/stat.h>
51354+#include <linux/grsecurity.h>
51355+#include <linux/grinternal.h>
51356+#include <linux/gracl.h>
51357+
51358+__u32
51359+gr_acl_handle_hidden_file(const struct dentry * dentry,
51360+ const struct vfsmount * mnt)
51361+{
51362+ __u32 mode;
51363+
51364+ if (unlikely(!dentry->d_inode))
51365+ return GR_FIND;
51366+
51367+ mode =
51368+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
51369+
51370+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
51371+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51372+ return mode;
51373+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
51374+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51375+ return 0;
51376+ } else if (unlikely(!(mode & GR_FIND)))
51377+ return 0;
51378+
51379+ return GR_FIND;
51380+}
51381+
51382+__u32
51383+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
51384+ const int fmode)
51385+{
51386+ __u32 reqmode = GR_FIND;
51387+ __u32 mode;
51388+
51389+ if (unlikely(!dentry->d_inode))
51390+ return reqmode;
51391+
51392+ if (unlikely(fmode & O_APPEND))
51393+ reqmode |= GR_APPEND;
51394+ else if (unlikely(fmode & FMODE_WRITE))
51395+ reqmode |= GR_WRITE;
51396+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51397+ reqmode |= GR_READ;
51398+ if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
51399+ reqmode &= ~GR_READ;
51400+ mode =
51401+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51402+ mnt);
51403+
51404+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51405+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51406+ reqmode & GR_READ ? " reading" : "",
51407+ reqmode & GR_WRITE ? " writing" : reqmode &
51408+ GR_APPEND ? " appending" : "");
51409+ return reqmode;
51410+ } else
51411+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51412+ {
51413+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51414+ reqmode & GR_READ ? " reading" : "",
51415+ reqmode & GR_WRITE ? " writing" : reqmode &
51416+ GR_APPEND ? " appending" : "");
51417+ return 0;
51418+ } else if (unlikely((mode & reqmode) != reqmode))
51419+ return 0;
51420+
51421+ return reqmode;
51422+}
51423+
51424+__u32
51425+gr_acl_handle_creat(const struct dentry * dentry,
51426+ const struct dentry * p_dentry,
51427+ const struct vfsmount * p_mnt, const int fmode,
51428+ const int imode)
51429+{
51430+ __u32 reqmode = GR_WRITE | GR_CREATE;
51431+ __u32 mode;
51432+
51433+ if (unlikely(fmode & O_APPEND))
51434+ reqmode |= GR_APPEND;
51435+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51436+ reqmode |= GR_READ;
51437+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
51438+ reqmode |= GR_SETID;
51439+
51440+ mode =
51441+ gr_check_create(dentry, p_dentry, p_mnt,
51442+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51443+
51444+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51445+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51446+ reqmode & GR_READ ? " reading" : "",
51447+ reqmode & GR_WRITE ? " writing" : reqmode &
51448+ GR_APPEND ? " appending" : "");
51449+ return reqmode;
51450+ } else
51451+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51452+ {
51453+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51454+ reqmode & GR_READ ? " reading" : "",
51455+ reqmode & GR_WRITE ? " writing" : reqmode &
51456+ GR_APPEND ? " appending" : "");
51457+ return 0;
51458+ } else if (unlikely((mode & reqmode) != reqmode))
51459+ return 0;
51460+
51461+ return reqmode;
51462+}
51463+
51464+__u32
51465+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
51466+ const int fmode)
51467+{
51468+ __u32 mode, reqmode = GR_FIND;
51469+
51470+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
51471+ reqmode |= GR_EXEC;
51472+ if (fmode & S_IWOTH)
51473+ reqmode |= GR_WRITE;
51474+ if (fmode & S_IROTH)
51475+ reqmode |= GR_READ;
51476+
51477+ mode =
51478+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51479+ mnt);
51480+
51481+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51482+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51483+ reqmode & GR_READ ? " reading" : "",
51484+ reqmode & GR_WRITE ? " writing" : "",
51485+ reqmode & GR_EXEC ? " executing" : "");
51486+ return reqmode;
51487+ } else
51488+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51489+ {
51490+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51491+ reqmode & GR_READ ? " reading" : "",
51492+ reqmode & GR_WRITE ? " writing" : "",
51493+ reqmode & GR_EXEC ? " executing" : "");
51494+ return 0;
51495+ } else if (unlikely((mode & reqmode) != reqmode))
51496+ return 0;
51497+
51498+ return reqmode;
51499+}
51500+
51501+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
51502+{
51503+ __u32 mode;
51504+
51505+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
51506+
51507+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51508+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
51509+ return mode;
51510+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51511+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
51512+ return 0;
51513+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51514+ return 0;
51515+
51516+ return (reqmode);
51517+}
51518+
51519+__u32
51520+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51521+{
51522+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
51523+}
51524+
51525+__u32
51526+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
51527+{
51528+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
51529+}
51530+
51531+__u32
51532+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
51533+{
51534+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
51535+}
51536+
51537+__u32
51538+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
51539+{
51540+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51541+}
51542+
51543+__u32
51544+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51545+ mode_t mode)
51546+{
51547+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51548+ return 1;
51549+
51550+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51551+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51552+ GR_FCHMOD_ACL_MSG);
51553+ } else {
51554+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51555+ }
51556+}
51557+
51558+__u32
51559+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51560+ mode_t mode)
51561+{
51562+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51563+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51564+ GR_CHMOD_ACL_MSG);
51565+ } else {
51566+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51567+ }
51568+}
51569+
51570+__u32
51571+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51572+{
51573+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51574+}
51575+
51576+__u32
51577+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51578+{
51579+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51580+}
51581+
51582+__u32
51583+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51584+{
51585+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51586+}
51587+
51588+__u32
51589+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51590+{
51591+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51592+ GR_UNIXCONNECT_ACL_MSG);
51593+}
51594+
51595+/* hardlinks require at minimum create permission,
51596+ any additional privilege required is based on the
51597+ privilege of the file being linked to
51598+*/
51599+__u32
51600+gr_acl_handle_link(const struct dentry * new_dentry,
51601+ const struct dentry * parent_dentry,
51602+ const struct vfsmount * parent_mnt,
51603+ const struct dentry * old_dentry,
51604+ const struct vfsmount * old_mnt, const char *to)
51605+{
51606+ __u32 mode;
51607+ __u32 needmode = GR_CREATE | GR_LINK;
51608+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51609+
51610+ mode =
51611+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51612+ old_mnt);
51613+
51614+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51615+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51616+ return mode;
51617+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51618+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51619+ return 0;
51620+ } else if (unlikely((mode & needmode) != needmode))
51621+ return 0;
51622+
51623+ return 1;
51624+}
51625+
51626+__u32
51627+gr_acl_handle_symlink(const struct dentry * new_dentry,
51628+ const struct dentry * parent_dentry,
51629+ const struct vfsmount * parent_mnt, const char *from)
51630+{
51631+ __u32 needmode = GR_WRITE | GR_CREATE;
51632+ __u32 mode;
51633+
51634+ mode =
51635+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
51636+ GR_CREATE | GR_AUDIT_CREATE |
51637+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
51638+
51639+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
51640+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51641+ return mode;
51642+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51643+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51644+ return 0;
51645+ } else if (unlikely((mode & needmode) != needmode))
51646+ return 0;
51647+
51648+ return (GR_WRITE | GR_CREATE);
51649+}
51650+
51651+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
51652+{
51653+ __u32 mode;
51654+
51655+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51656+
51657+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51658+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
51659+ return mode;
51660+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51661+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
51662+ return 0;
51663+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51664+ return 0;
51665+
51666+ return (reqmode);
51667+}
51668+
51669+__u32
51670+gr_acl_handle_mknod(const struct dentry * new_dentry,
51671+ const struct dentry * parent_dentry,
51672+ const struct vfsmount * parent_mnt,
51673+ const int mode)
51674+{
51675+ __u32 reqmode = GR_WRITE | GR_CREATE;
51676+ if (unlikely(mode & (S_ISUID | S_ISGID)))
51677+ reqmode |= GR_SETID;
51678+
51679+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51680+ reqmode, GR_MKNOD_ACL_MSG);
51681+}
51682+
51683+__u32
51684+gr_acl_handle_mkdir(const struct dentry *new_dentry,
51685+ const struct dentry *parent_dentry,
51686+ const struct vfsmount *parent_mnt)
51687+{
51688+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51689+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
51690+}
51691+
51692+#define RENAME_CHECK_SUCCESS(old, new) \
51693+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
51694+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
51695+
51696+int
51697+gr_acl_handle_rename(struct dentry *new_dentry,
51698+ struct dentry *parent_dentry,
51699+ const struct vfsmount *parent_mnt,
51700+ struct dentry *old_dentry,
51701+ struct inode *old_parent_inode,
51702+ struct vfsmount *old_mnt, const char *newname)
51703+{
51704+ __u32 comp1, comp2;
51705+ int error = 0;
51706+
51707+ if (unlikely(!gr_acl_is_enabled()))
51708+ return 0;
51709+
51710+ if (!new_dentry->d_inode) {
51711+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
51712+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
51713+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
51714+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
51715+ GR_DELETE | GR_AUDIT_DELETE |
51716+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51717+ GR_SUPPRESS, old_mnt);
51718+ } else {
51719+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
51720+ GR_CREATE | GR_DELETE |
51721+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
51722+ GR_AUDIT_READ | GR_AUDIT_WRITE |
51723+ GR_SUPPRESS, parent_mnt);
51724+ comp2 =
51725+ gr_search_file(old_dentry,
51726+ GR_READ | GR_WRITE | GR_AUDIT_READ |
51727+ GR_DELETE | GR_AUDIT_DELETE |
51728+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
51729+ }
51730+
51731+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
51732+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
51733+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51734+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
51735+ && !(comp2 & GR_SUPPRESS)) {
51736+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
51737+ error = -EACCES;
51738+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
51739+ error = -EACCES;
51740+
51741+ return error;
51742+}
51743+
51744+void
51745+gr_acl_handle_exit(void)
51746+{
51747+ u16 id;
51748+ char *rolename;
51749+ struct file *exec_file;
51750+
51751+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
51752+ !(current->role->roletype & GR_ROLE_PERSIST))) {
51753+ id = current->acl_role_id;
51754+ rolename = current->role->rolename;
51755+ gr_set_acls(1);
51756+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
51757+ }
51758+
51759+ write_lock(&grsec_exec_file_lock);
51760+ exec_file = current->exec_file;
51761+ current->exec_file = NULL;
51762+ write_unlock(&grsec_exec_file_lock);
51763+
51764+ if (exec_file)
51765+ fput(exec_file);
51766+}
51767+
51768+int
51769+gr_acl_handle_procpidmem(const struct task_struct *task)
51770+{
51771+ if (unlikely(!gr_acl_is_enabled()))
51772+ return 0;
51773+
51774+ if (task != current && task->acl->mode & GR_PROTPROCFD)
51775+ return -EACCES;
51776+
51777+ return 0;
51778+}
51779diff -urNp linux-2.6.32.45/grsecurity/gracl_ip.c linux-2.6.32.45/grsecurity/gracl_ip.c
51780--- linux-2.6.32.45/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
51781+++ linux-2.6.32.45/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
51782@@ -0,0 +1,382 @@
51783+#include <linux/kernel.h>
51784+#include <asm/uaccess.h>
51785+#include <asm/errno.h>
51786+#include <net/sock.h>
51787+#include <linux/file.h>
51788+#include <linux/fs.h>
51789+#include <linux/net.h>
51790+#include <linux/in.h>
51791+#include <linux/skbuff.h>
51792+#include <linux/ip.h>
51793+#include <linux/udp.h>
51794+#include <linux/smp_lock.h>
51795+#include <linux/types.h>
51796+#include <linux/sched.h>
51797+#include <linux/netdevice.h>
51798+#include <linux/inetdevice.h>
51799+#include <linux/gracl.h>
51800+#include <linux/grsecurity.h>
51801+#include <linux/grinternal.h>
51802+
51803+#define GR_BIND 0x01
51804+#define GR_CONNECT 0x02
51805+#define GR_INVERT 0x04
51806+#define GR_BINDOVERRIDE 0x08
51807+#define GR_CONNECTOVERRIDE 0x10
51808+#define GR_SOCK_FAMILY 0x20
51809+
51810+static const char * gr_protocols[IPPROTO_MAX] = {
51811+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
51812+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
51813+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
51814+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
51815+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
51816+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
51817+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
51818+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
51819+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
51820+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
51821+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
51822+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
51823+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
51824+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
51825+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
51826+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
51827+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
51828+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
51829+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
51830+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
51831+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
51832+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
51833+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
51834+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
51835+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
51836+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
51837+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
51838+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
51839+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
51840+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
51841+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
51842+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
51843+ };
51844+
51845+static const char * gr_socktypes[SOCK_MAX] = {
51846+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
51847+ "unknown:7", "unknown:8", "unknown:9", "packet"
51848+ };
51849+
51850+static const char * gr_sockfamilies[AF_MAX+1] = {
51851+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
51852+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
51853+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
51854+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
51855+ };
51856+
51857+const char *
51858+gr_proto_to_name(unsigned char proto)
51859+{
51860+ return gr_protocols[proto];
51861+}
51862+
51863+const char *
51864+gr_socktype_to_name(unsigned char type)
51865+{
51866+ return gr_socktypes[type];
51867+}
51868+
51869+const char *
51870+gr_sockfamily_to_name(unsigned char family)
51871+{
51872+ return gr_sockfamilies[family];
51873+}
51874+
51875+int
51876+gr_search_socket(const int domain, const int type, const int protocol)
51877+{
51878+ struct acl_subject_label *curr;
51879+ const struct cred *cred = current_cred();
51880+
51881+ if (unlikely(!gr_acl_is_enabled()))
51882+ goto exit;
51883+
51884+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
51885+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
51886+ goto exit; // let the kernel handle it
51887+
51888+ curr = current->acl;
51889+
51890+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
51891+ /* the family is allowed, if this is PF_INET allow it only if
51892+ the extra sock type/protocol checks pass */
51893+ if (domain == PF_INET)
51894+ goto inet_check;
51895+ goto exit;
51896+ } else {
51897+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51898+ __u32 fakeip = 0;
51899+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51900+ current->role->roletype, cred->uid,
51901+ cred->gid, current->exec_file ?
51902+ gr_to_filename(current->exec_file->f_path.dentry,
51903+ current->exec_file->f_path.mnt) :
51904+ curr->filename, curr->filename,
51905+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
51906+ &current->signal->saved_ip);
51907+ goto exit;
51908+ }
51909+ goto exit_fail;
51910+ }
51911+
51912+inet_check:
51913+ /* the rest of this checking is for IPv4 only */
51914+ if (!curr->ips)
51915+ goto exit;
51916+
51917+ if ((curr->ip_type & (1 << type)) &&
51918+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
51919+ goto exit;
51920+
51921+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
51922+ /* we don't place acls on raw sockets , and sometimes
51923+ dgram/ip sockets are opened for ioctl and not
51924+ bind/connect, so we'll fake a bind learn log */
51925+ if (type == SOCK_RAW || type == SOCK_PACKET) {
51926+ __u32 fakeip = 0;
51927+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51928+ current->role->roletype, cred->uid,
51929+ cred->gid, current->exec_file ?
51930+ gr_to_filename(current->exec_file->f_path.dentry,
51931+ current->exec_file->f_path.mnt) :
51932+ curr->filename, curr->filename,
51933+ &fakeip, 0, type,
51934+ protocol, GR_CONNECT, &current->signal->saved_ip);
51935+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
51936+ __u32 fakeip = 0;
51937+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
51938+ current->role->roletype, cred->uid,
51939+ cred->gid, current->exec_file ?
51940+ gr_to_filename(current->exec_file->f_path.dentry,
51941+ current->exec_file->f_path.mnt) :
51942+ curr->filename, curr->filename,
51943+ &fakeip, 0, type,
51944+ protocol, GR_BIND, &current->signal->saved_ip);
51945+ }
51946+ /* we'll log when they use connect or bind */
51947+ goto exit;
51948+ }
51949+
51950+exit_fail:
51951+ if (domain == PF_INET)
51952+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
51953+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
51954+ else
51955+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
51956+ gr_socktype_to_name(type), protocol);
51957+
51958+ return 0;
51959+exit:
51960+ return 1;
51961+}
51962+
51963+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
51964+{
51965+ if ((ip->mode & mode) &&
51966+ (ip_port >= ip->low) &&
51967+ (ip_port <= ip->high) &&
51968+ ((ntohl(ip_addr) & our_netmask) ==
51969+ (ntohl(our_addr) & our_netmask))
51970+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
51971+ && (ip->type & (1 << type))) {
51972+ if (ip->mode & GR_INVERT)
51973+ return 2; // specifically denied
51974+ else
51975+ return 1; // allowed
51976+ }
51977+
51978+ return 0; // not specifically allowed, may continue parsing
51979+}
51980+
51981+static int
51982+gr_search_connectbind(const int full_mode, struct sock *sk,
51983+ struct sockaddr_in *addr, const int type)
51984+{
51985+ char iface[IFNAMSIZ] = {0};
51986+ struct acl_subject_label *curr;
51987+ struct acl_ip_label *ip;
51988+ struct inet_sock *isk;
51989+ struct net_device *dev;
51990+ struct in_device *idev;
51991+ unsigned long i;
51992+ int ret;
51993+ int mode = full_mode & (GR_BIND | GR_CONNECT);
51994+ __u32 ip_addr = 0;
51995+ __u32 our_addr;
51996+ __u32 our_netmask;
51997+ char *p;
51998+ __u16 ip_port = 0;
51999+ const struct cred *cred = current_cred();
52000+
52001+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
52002+ return 0;
52003+
52004+ curr = current->acl;
52005+ isk = inet_sk(sk);
52006+
52007+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
52008+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
52009+ addr->sin_addr.s_addr = curr->inaddr_any_override;
52010+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
52011+ struct sockaddr_in saddr;
52012+ int err;
52013+
52014+ saddr.sin_family = AF_INET;
52015+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
52016+ saddr.sin_port = isk->sport;
52017+
52018+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
52019+ if (err)
52020+ return err;
52021+
52022+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
52023+ if (err)
52024+ return err;
52025+ }
52026+
52027+ if (!curr->ips)
52028+ return 0;
52029+
52030+ ip_addr = addr->sin_addr.s_addr;
52031+ ip_port = ntohs(addr->sin_port);
52032+
52033+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52034+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52035+ current->role->roletype, cred->uid,
52036+ cred->gid, current->exec_file ?
52037+ gr_to_filename(current->exec_file->f_path.dentry,
52038+ current->exec_file->f_path.mnt) :
52039+ curr->filename, curr->filename,
52040+ &ip_addr, ip_port, type,
52041+ sk->sk_protocol, mode, &current->signal->saved_ip);
52042+ return 0;
52043+ }
52044+
52045+ for (i = 0; i < curr->ip_num; i++) {
52046+ ip = *(curr->ips + i);
52047+ if (ip->iface != NULL) {
52048+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
52049+ p = strchr(iface, ':');
52050+ if (p != NULL)
52051+ *p = '\0';
52052+ dev = dev_get_by_name(sock_net(sk), iface);
52053+ if (dev == NULL)
52054+ continue;
52055+ idev = in_dev_get(dev);
52056+ if (idev == NULL) {
52057+ dev_put(dev);
52058+ continue;
52059+ }
52060+ rcu_read_lock();
52061+ for_ifa(idev) {
52062+ if (!strcmp(ip->iface, ifa->ifa_label)) {
52063+ our_addr = ifa->ifa_address;
52064+ our_netmask = 0xffffffff;
52065+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
52066+ if (ret == 1) {
52067+ rcu_read_unlock();
52068+ in_dev_put(idev);
52069+ dev_put(dev);
52070+ return 0;
52071+ } else if (ret == 2) {
52072+ rcu_read_unlock();
52073+ in_dev_put(idev);
52074+ dev_put(dev);
52075+ goto denied;
52076+ }
52077+ }
52078+ } endfor_ifa(idev);
52079+ rcu_read_unlock();
52080+ in_dev_put(idev);
52081+ dev_put(dev);
52082+ } else {
52083+ our_addr = ip->addr;
52084+ our_netmask = ip->netmask;
52085+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
52086+ if (ret == 1)
52087+ return 0;
52088+ else if (ret == 2)
52089+ goto denied;
52090+ }
52091+ }
52092+
52093+denied:
52094+ if (mode == GR_BIND)
52095+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
52096+ else if (mode == GR_CONNECT)
52097+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
52098+
52099+ return -EACCES;
52100+}
52101+
52102+int
52103+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
52104+{
52105+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
52106+}
52107+
52108+int
52109+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
52110+{
52111+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
52112+}
52113+
52114+int gr_search_listen(struct socket *sock)
52115+{
52116+ struct sock *sk = sock->sk;
52117+ struct sockaddr_in addr;
52118+
52119+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
52120+ addr.sin_port = inet_sk(sk)->sport;
52121+
52122+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
52123+}
52124+
52125+int gr_search_accept(struct socket *sock)
52126+{
52127+ struct sock *sk = sock->sk;
52128+ struct sockaddr_in addr;
52129+
52130+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
52131+ addr.sin_port = inet_sk(sk)->sport;
52132+
52133+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
52134+}
52135+
52136+int
52137+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
52138+{
52139+ if (addr)
52140+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
52141+ else {
52142+ struct sockaddr_in sin;
52143+ const struct inet_sock *inet = inet_sk(sk);
52144+
52145+ sin.sin_addr.s_addr = inet->daddr;
52146+ sin.sin_port = inet->dport;
52147+
52148+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52149+ }
52150+}
52151+
52152+int
52153+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
52154+{
52155+ struct sockaddr_in sin;
52156+
52157+ if (unlikely(skb->len < sizeof (struct udphdr)))
52158+ return 0; // skip this packet
52159+
52160+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
52161+ sin.sin_port = udp_hdr(skb)->source;
52162+
52163+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52164+}
52165diff -urNp linux-2.6.32.45/grsecurity/gracl_learn.c linux-2.6.32.45/grsecurity/gracl_learn.c
52166--- linux-2.6.32.45/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
52167+++ linux-2.6.32.45/grsecurity/gracl_learn.c 2011-07-14 21:02:03.000000000 -0400
52168@@ -0,0 +1,208 @@
52169+#include <linux/kernel.h>
52170+#include <linux/mm.h>
52171+#include <linux/sched.h>
52172+#include <linux/poll.h>
52173+#include <linux/smp_lock.h>
52174+#include <linux/string.h>
52175+#include <linux/file.h>
52176+#include <linux/types.h>
52177+#include <linux/vmalloc.h>
52178+#include <linux/grinternal.h>
52179+
52180+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
52181+ size_t count, loff_t *ppos);
52182+extern int gr_acl_is_enabled(void);
52183+
52184+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
52185+static int gr_learn_attached;
52186+
52187+/* use a 512k buffer */
52188+#define LEARN_BUFFER_SIZE (512 * 1024)
52189+
52190+static DEFINE_SPINLOCK(gr_learn_lock);
52191+static DEFINE_MUTEX(gr_learn_user_mutex);
52192+
52193+/* we need to maintain two buffers, so that the kernel context of grlearn
52194+ uses a semaphore around the userspace copying, and the other kernel contexts
52195+ use a spinlock when copying into the buffer, since they cannot sleep
52196+*/
52197+static char *learn_buffer;
52198+static char *learn_buffer_user;
52199+static int learn_buffer_len;
52200+static int learn_buffer_user_len;
52201+
52202+static ssize_t
52203+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
52204+{
52205+ DECLARE_WAITQUEUE(wait, current);
52206+ ssize_t retval = 0;
52207+
52208+ add_wait_queue(&learn_wait, &wait);
52209+ set_current_state(TASK_INTERRUPTIBLE);
52210+ do {
52211+ mutex_lock(&gr_learn_user_mutex);
52212+ spin_lock(&gr_learn_lock);
52213+ if (learn_buffer_len)
52214+ break;
52215+ spin_unlock(&gr_learn_lock);
52216+ mutex_unlock(&gr_learn_user_mutex);
52217+ if (file->f_flags & O_NONBLOCK) {
52218+ retval = -EAGAIN;
52219+ goto out;
52220+ }
52221+ if (signal_pending(current)) {
52222+ retval = -ERESTARTSYS;
52223+ goto out;
52224+ }
52225+
52226+ schedule();
52227+ } while (1);
52228+
52229+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
52230+ learn_buffer_user_len = learn_buffer_len;
52231+ retval = learn_buffer_len;
52232+ learn_buffer_len = 0;
52233+
52234+ spin_unlock(&gr_learn_lock);
52235+
52236+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
52237+ retval = -EFAULT;
52238+
52239+ mutex_unlock(&gr_learn_user_mutex);
52240+out:
52241+ set_current_state(TASK_RUNNING);
52242+ remove_wait_queue(&learn_wait, &wait);
52243+ return retval;
52244+}
52245+
52246+static unsigned int
52247+poll_learn(struct file * file, poll_table * wait)
52248+{
52249+ poll_wait(file, &learn_wait, wait);
52250+
52251+ if (learn_buffer_len)
52252+ return (POLLIN | POLLRDNORM);
52253+
52254+ return 0;
52255+}
52256+
52257+void
52258+gr_clear_learn_entries(void)
52259+{
52260+ char *tmp;
52261+
52262+ mutex_lock(&gr_learn_user_mutex);
52263+ spin_lock(&gr_learn_lock);
52264+ tmp = learn_buffer;
52265+ learn_buffer = NULL;
52266+ spin_unlock(&gr_learn_lock);
52267+ if (tmp)
52268+ vfree(tmp);
52269+ if (learn_buffer_user != NULL) {
52270+ vfree(learn_buffer_user);
52271+ learn_buffer_user = NULL;
52272+ }
52273+ learn_buffer_len = 0;
52274+ mutex_unlock(&gr_learn_user_mutex);
52275+
52276+ return;
52277+}
52278+
52279+void
52280+gr_add_learn_entry(const char *fmt, ...)
52281+{
52282+ va_list args;
52283+ unsigned int len;
52284+
52285+ if (!gr_learn_attached)
52286+ return;
52287+
52288+ spin_lock(&gr_learn_lock);
52289+
52290+ /* leave a gap at the end so we know when it's "full" but don't have to
52291+ compute the exact length of the string we're trying to append
52292+ */
52293+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
52294+ spin_unlock(&gr_learn_lock);
52295+ wake_up_interruptible(&learn_wait);
52296+ return;
52297+ }
52298+ if (learn_buffer == NULL) {
52299+ spin_unlock(&gr_learn_lock);
52300+ return;
52301+ }
52302+
52303+ va_start(args, fmt);
52304+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
52305+ va_end(args);
52306+
52307+ learn_buffer_len += len + 1;
52308+
52309+ spin_unlock(&gr_learn_lock);
52310+ wake_up_interruptible(&learn_wait);
52311+
52312+ return;
52313+}
52314+
52315+static int
52316+open_learn(struct inode *inode, struct file *file)
52317+{
52318+ if (file->f_mode & FMODE_READ && gr_learn_attached)
52319+ return -EBUSY;
52320+ if (file->f_mode & FMODE_READ) {
52321+ int retval = 0;
52322+ mutex_lock(&gr_learn_user_mutex);
52323+ if (learn_buffer == NULL)
52324+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
52325+ if (learn_buffer_user == NULL)
52326+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
52327+ if (learn_buffer == NULL) {
52328+ retval = -ENOMEM;
52329+ goto out_error;
52330+ }
52331+ if (learn_buffer_user == NULL) {
52332+ retval = -ENOMEM;
52333+ goto out_error;
52334+ }
52335+ learn_buffer_len = 0;
52336+ learn_buffer_user_len = 0;
52337+ gr_learn_attached = 1;
52338+out_error:
52339+ mutex_unlock(&gr_learn_user_mutex);
52340+ return retval;
52341+ }
52342+ return 0;
52343+}
52344+
52345+static int
52346+close_learn(struct inode *inode, struct file *file)
52347+{
52348+ if (file->f_mode & FMODE_READ) {
52349+ char *tmp = NULL;
52350+ mutex_lock(&gr_learn_user_mutex);
52351+ spin_lock(&gr_learn_lock);
52352+ tmp = learn_buffer;
52353+ learn_buffer = NULL;
52354+ spin_unlock(&gr_learn_lock);
52355+ if (tmp)
52356+ vfree(tmp);
52357+ if (learn_buffer_user != NULL) {
52358+ vfree(learn_buffer_user);
52359+ learn_buffer_user = NULL;
52360+ }
52361+ learn_buffer_len = 0;
52362+ learn_buffer_user_len = 0;
52363+ gr_learn_attached = 0;
52364+ mutex_unlock(&gr_learn_user_mutex);
52365+ }
52366+
52367+ return 0;
52368+}
52369+
52370+const struct file_operations grsec_fops = {
52371+ .read = read_learn,
52372+ .write = write_grsec_handler,
52373+ .open = open_learn,
52374+ .release = close_learn,
52375+ .poll = poll_learn,
52376+};
52377diff -urNp linux-2.6.32.45/grsecurity/gracl_res.c linux-2.6.32.45/grsecurity/gracl_res.c
52378--- linux-2.6.32.45/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
52379+++ linux-2.6.32.45/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
52380@@ -0,0 +1,67 @@
52381+#include <linux/kernel.h>
52382+#include <linux/sched.h>
52383+#include <linux/gracl.h>
52384+#include <linux/grinternal.h>
52385+
52386+static const char *restab_log[] = {
52387+ [RLIMIT_CPU] = "RLIMIT_CPU",
52388+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
52389+ [RLIMIT_DATA] = "RLIMIT_DATA",
52390+ [RLIMIT_STACK] = "RLIMIT_STACK",
52391+ [RLIMIT_CORE] = "RLIMIT_CORE",
52392+ [RLIMIT_RSS] = "RLIMIT_RSS",
52393+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
52394+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
52395+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
52396+ [RLIMIT_AS] = "RLIMIT_AS",
52397+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
52398+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
52399+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
52400+ [RLIMIT_NICE] = "RLIMIT_NICE",
52401+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
52402+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
52403+ [GR_CRASH_RES] = "RLIMIT_CRASH"
52404+};
52405+
52406+void
52407+gr_log_resource(const struct task_struct *task,
52408+ const int res, const unsigned long wanted, const int gt)
52409+{
52410+ const struct cred *cred;
52411+ unsigned long rlim;
52412+
52413+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
52414+ return;
52415+
52416+ // not yet supported resource
52417+ if (unlikely(!restab_log[res]))
52418+ return;
52419+
52420+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
52421+ rlim = task->signal->rlim[res].rlim_max;
52422+ else
52423+ rlim = task->signal->rlim[res].rlim_cur;
52424+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
52425+ return;
52426+
52427+ rcu_read_lock();
52428+ cred = __task_cred(task);
52429+
52430+ if (res == RLIMIT_NPROC &&
52431+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
52432+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
52433+ goto out_rcu_unlock;
52434+ else if (res == RLIMIT_MEMLOCK &&
52435+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
52436+ goto out_rcu_unlock;
52437+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
52438+ goto out_rcu_unlock;
52439+ rcu_read_unlock();
52440+
52441+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
52442+
52443+ return;
52444+out_rcu_unlock:
52445+ rcu_read_unlock();
52446+ return;
52447+}
52448diff -urNp linux-2.6.32.45/grsecurity/gracl_segv.c linux-2.6.32.45/grsecurity/gracl_segv.c
52449--- linux-2.6.32.45/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
52450+++ linux-2.6.32.45/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
52451@@ -0,0 +1,284 @@
52452+#include <linux/kernel.h>
52453+#include <linux/mm.h>
52454+#include <asm/uaccess.h>
52455+#include <asm/errno.h>
52456+#include <asm/mman.h>
52457+#include <net/sock.h>
52458+#include <linux/file.h>
52459+#include <linux/fs.h>
52460+#include <linux/net.h>
52461+#include <linux/in.h>
52462+#include <linux/smp_lock.h>
52463+#include <linux/slab.h>
52464+#include <linux/types.h>
52465+#include <linux/sched.h>
52466+#include <linux/timer.h>
52467+#include <linux/gracl.h>
52468+#include <linux/grsecurity.h>
52469+#include <linux/grinternal.h>
52470+
52471+static struct crash_uid *uid_set;
52472+static unsigned short uid_used;
52473+static DEFINE_SPINLOCK(gr_uid_lock);
52474+extern rwlock_t gr_inode_lock;
52475+extern struct acl_subject_label *
52476+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
52477+ struct acl_role_label *role);
52478+extern int gr_fake_force_sig(int sig, struct task_struct *t);
52479+
52480+int
52481+gr_init_uidset(void)
52482+{
52483+ uid_set =
52484+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
52485+ uid_used = 0;
52486+
52487+ return uid_set ? 1 : 0;
52488+}
52489+
52490+void
52491+gr_free_uidset(void)
52492+{
52493+ if (uid_set)
52494+ kfree(uid_set);
52495+
52496+ return;
52497+}
52498+
52499+int
52500+gr_find_uid(const uid_t uid)
52501+{
52502+ struct crash_uid *tmp = uid_set;
52503+ uid_t buid;
52504+ int low = 0, high = uid_used - 1, mid;
52505+
52506+ while (high >= low) {
52507+ mid = (low + high) >> 1;
52508+ buid = tmp[mid].uid;
52509+ if (buid == uid)
52510+ return mid;
52511+ if (buid > uid)
52512+ high = mid - 1;
52513+ if (buid < uid)
52514+ low = mid + 1;
52515+ }
52516+
52517+ return -1;
52518+}
52519+
52520+static __inline__ void
52521+gr_insertsort(void)
52522+{
52523+ unsigned short i, j;
52524+ struct crash_uid index;
52525+
52526+ for (i = 1; i < uid_used; i++) {
52527+ index = uid_set[i];
52528+ j = i;
52529+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
52530+ uid_set[j] = uid_set[j - 1];
52531+ j--;
52532+ }
52533+ uid_set[j] = index;
52534+ }
52535+
52536+ return;
52537+}
52538+
52539+static __inline__ void
52540+gr_insert_uid(const uid_t uid, const unsigned long expires)
52541+{
52542+ int loc;
52543+
52544+ if (uid_used == GR_UIDTABLE_MAX)
52545+ return;
52546+
52547+ loc = gr_find_uid(uid);
52548+
52549+ if (loc >= 0) {
52550+ uid_set[loc].expires = expires;
52551+ return;
52552+ }
52553+
52554+ uid_set[uid_used].uid = uid;
52555+ uid_set[uid_used].expires = expires;
52556+ uid_used++;
52557+
52558+ gr_insertsort();
52559+
52560+ return;
52561+}
52562+
52563+void
52564+gr_remove_uid(const unsigned short loc)
52565+{
52566+ unsigned short i;
52567+
52568+ for (i = loc + 1; i < uid_used; i++)
52569+ uid_set[i - 1] = uid_set[i];
52570+
52571+ uid_used--;
52572+
52573+ return;
52574+}
52575+
52576+int
52577+gr_check_crash_uid(const uid_t uid)
52578+{
52579+ int loc;
52580+ int ret = 0;
52581+
52582+ if (unlikely(!gr_acl_is_enabled()))
52583+ return 0;
52584+
52585+ spin_lock(&gr_uid_lock);
52586+ loc = gr_find_uid(uid);
52587+
52588+ if (loc < 0)
52589+ goto out_unlock;
52590+
52591+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
52592+ gr_remove_uid(loc);
52593+ else
52594+ ret = 1;
52595+
52596+out_unlock:
52597+ spin_unlock(&gr_uid_lock);
52598+ return ret;
52599+}
52600+
52601+static __inline__ int
52602+proc_is_setxid(const struct cred *cred)
52603+{
52604+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
52605+ cred->uid != cred->fsuid)
52606+ return 1;
52607+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52608+ cred->gid != cred->fsgid)
52609+ return 1;
52610+
52611+ return 0;
52612+}
52613+
52614+void
52615+gr_handle_crash(struct task_struct *task, const int sig)
52616+{
52617+ struct acl_subject_label *curr;
52618+ struct acl_subject_label *curr2;
52619+ struct task_struct *tsk, *tsk2;
52620+ const struct cred *cred;
52621+ const struct cred *cred2;
52622+
52623+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
52624+ return;
52625+
52626+ if (unlikely(!gr_acl_is_enabled()))
52627+ return;
52628+
52629+ curr = task->acl;
52630+
52631+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
52632+ return;
52633+
52634+ if (time_before_eq(curr->expires, get_seconds())) {
52635+ curr->expires = 0;
52636+ curr->crashes = 0;
52637+ }
52638+
52639+ curr->crashes++;
52640+
52641+ if (!curr->expires)
52642+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
52643+
52644+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52645+ time_after(curr->expires, get_seconds())) {
52646+ rcu_read_lock();
52647+ cred = __task_cred(task);
52648+ if (cred->uid && proc_is_setxid(cred)) {
52649+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52650+ spin_lock(&gr_uid_lock);
52651+ gr_insert_uid(cred->uid, curr->expires);
52652+ spin_unlock(&gr_uid_lock);
52653+ curr->expires = 0;
52654+ curr->crashes = 0;
52655+ read_lock(&tasklist_lock);
52656+ do_each_thread(tsk2, tsk) {
52657+ cred2 = __task_cred(tsk);
52658+ if (tsk != task && cred2->uid == cred->uid)
52659+ gr_fake_force_sig(SIGKILL, tsk);
52660+ } while_each_thread(tsk2, tsk);
52661+ read_unlock(&tasklist_lock);
52662+ } else {
52663+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52664+ read_lock(&tasklist_lock);
52665+ do_each_thread(tsk2, tsk) {
52666+ if (likely(tsk != task)) {
52667+ curr2 = tsk->acl;
52668+
52669+ if (curr2->device == curr->device &&
52670+ curr2->inode == curr->inode)
52671+ gr_fake_force_sig(SIGKILL, tsk);
52672+ }
52673+ } while_each_thread(tsk2, tsk);
52674+ read_unlock(&tasklist_lock);
52675+ }
52676+ rcu_read_unlock();
52677+ }
52678+
52679+ return;
52680+}
52681+
52682+int
52683+gr_check_crash_exec(const struct file *filp)
52684+{
52685+ struct acl_subject_label *curr;
52686+
52687+ if (unlikely(!gr_acl_is_enabled()))
52688+ return 0;
52689+
52690+ read_lock(&gr_inode_lock);
52691+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
52692+ filp->f_path.dentry->d_inode->i_sb->s_dev,
52693+ current->role);
52694+ read_unlock(&gr_inode_lock);
52695+
52696+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
52697+ (!curr->crashes && !curr->expires))
52698+ return 0;
52699+
52700+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52701+ time_after(curr->expires, get_seconds()))
52702+ return 1;
52703+ else if (time_before_eq(curr->expires, get_seconds())) {
52704+ curr->crashes = 0;
52705+ curr->expires = 0;
52706+ }
52707+
52708+ return 0;
52709+}
52710+
52711+void
52712+gr_handle_alertkill(struct task_struct *task)
52713+{
52714+ struct acl_subject_label *curracl;
52715+ __u32 curr_ip;
52716+ struct task_struct *p, *p2;
52717+
52718+ if (unlikely(!gr_acl_is_enabled()))
52719+ return;
52720+
52721+ curracl = task->acl;
52722+ curr_ip = task->signal->curr_ip;
52723+
52724+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
52725+ read_lock(&tasklist_lock);
52726+ do_each_thread(p2, p) {
52727+ if (p->signal->curr_ip == curr_ip)
52728+ gr_fake_force_sig(SIGKILL, p);
52729+ } while_each_thread(p2, p);
52730+ read_unlock(&tasklist_lock);
52731+ } else if (curracl->mode & GR_KILLPROC)
52732+ gr_fake_force_sig(SIGKILL, task);
52733+
52734+ return;
52735+}
52736diff -urNp linux-2.6.32.45/grsecurity/gracl_shm.c linux-2.6.32.45/grsecurity/gracl_shm.c
52737--- linux-2.6.32.45/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
52738+++ linux-2.6.32.45/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
52739@@ -0,0 +1,40 @@
52740+#include <linux/kernel.h>
52741+#include <linux/mm.h>
52742+#include <linux/sched.h>
52743+#include <linux/file.h>
52744+#include <linux/ipc.h>
52745+#include <linux/gracl.h>
52746+#include <linux/grsecurity.h>
52747+#include <linux/grinternal.h>
52748+
52749+int
52750+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
52751+ const time_t shm_createtime, const uid_t cuid, const int shmid)
52752+{
52753+ struct task_struct *task;
52754+
52755+ if (!gr_acl_is_enabled())
52756+ return 1;
52757+
52758+ rcu_read_lock();
52759+ read_lock(&tasklist_lock);
52760+
52761+ task = find_task_by_vpid(shm_cprid);
52762+
52763+ if (unlikely(!task))
52764+ task = find_task_by_vpid(shm_lapid);
52765+
52766+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
52767+ (task->pid == shm_lapid)) &&
52768+ (task->acl->mode & GR_PROTSHM) &&
52769+ (task->acl != current->acl))) {
52770+ read_unlock(&tasklist_lock);
52771+ rcu_read_unlock();
52772+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
52773+ return 0;
52774+ }
52775+ read_unlock(&tasklist_lock);
52776+ rcu_read_unlock();
52777+
52778+ return 1;
52779+}
52780diff -urNp linux-2.6.32.45/grsecurity/grsec_chdir.c linux-2.6.32.45/grsecurity/grsec_chdir.c
52781--- linux-2.6.32.45/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
52782+++ linux-2.6.32.45/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
52783@@ -0,0 +1,19 @@
52784+#include <linux/kernel.h>
52785+#include <linux/sched.h>
52786+#include <linux/fs.h>
52787+#include <linux/file.h>
52788+#include <linux/grsecurity.h>
52789+#include <linux/grinternal.h>
52790+
52791+void
52792+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
52793+{
52794+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52795+ if ((grsec_enable_chdir && grsec_enable_group &&
52796+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
52797+ !grsec_enable_group)) {
52798+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
52799+ }
52800+#endif
52801+ return;
52802+}
52803diff -urNp linux-2.6.32.45/grsecurity/grsec_chroot.c linux-2.6.32.45/grsecurity/grsec_chroot.c
52804--- linux-2.6.32.45/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
52805+++ linux-2.6.32.45/grsecurity/grsec_chroot.c 2011-07-18 17:14:10.000000000 -0400
52806@@ -0,0 +1,384 @@
52807+#include <linux/kernel.h>
52808+#include <linux/module.h>
52809+#include <linux/sched.h>
52810+#include <linux/file.h>
52811+#include <linux/fs.h>
52812+#include <linux/mount.h>
52813+#include <linux/types.h>
52814+#include <linux/pid_namespace.h>
52815+#include <linux/grsecurity.h>
52816+#include <linux/grinternal.h>
52817+
52818+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
52819+{
52820+#ifdef CONFIG_GRKERNSEC
52821+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
52822+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
52823+ task->gr_is_chrooted = 1;
52824+ else
52825+ task->gr_is_chrooted = 0;
52826+
52827+ task->gr_chroot_dentry = path->dentry;
52828+#endif
52829+ return;
52830+}
52831+
52832+void gr_clear_chroot_entries(struct task_struct *task)
52833+{
52834+#ifdef CONFIG_GRKERNSEC
52835+ task->gr_is_chrooted = 0;
52836+ task->gr_chroot_dentry = NULL;
52837+#endif
52838+ return;
52839+}
52840+
52841+int
52842+gr_handle_chroot_unix(const pid_t pid)
52843+{
52844+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52845+ struct task_struct *p;
52846+
52847+ if (unlikely(!grsec_enable_chroot_unix))
52848+ return 1;
52849+
52850+ if (likely(!proc_is_chrooted(current)))
52851+ return 1;
52852+
52853+ rcu_read_lock();
52854+ read_lock(&tasklist_lock);
52855+
52856+ p = find_task_by_vpid_unrestricted(pid);
52857+ if (unlikely(p && !have_same_root(current, p))) {
52858+ read_unlock(&tasklist_lock);
52859+ rcu_read_unlock();
52860+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
52861+ return 0;
52862+ }
52863+ read_unlock(&tasklist_lock);
52864+ rcu_read_unlock();
52865+#endif
52866+ return 1;
52867+}
52868+
52869+int
52870+gr_handle_chroot_nice(void)
52871+{
52872+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52873+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
52874+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
52875+ return -EPERM;
52876+ }
52877+#endif
52878+ return 0;
52879+}
52880+
52881+int
52882+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
52883+{
52884+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52885+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
52886+ && proc_is_chrooted(current)) {
52887+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
52888+ return -EACCES;
52889+ }
52890+#endif
52891+ return 0;
52892+}
52893+
52894+int
52895+gr_handle_chroot_rawio(const struct inode *inode)
52896+{
52897+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52898+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
52899+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
52900+ return 1;
52901+#endif
52902+ return 0;
52903+}
52904+
52905+int
52906+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
52907+{
52908+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52909+ struct task_struct *p;
52910+ int ret = 0;
52911+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
52912+ return ret;
52913+
52914+ read_lock(&tasklist_lock);
52915+ do_each_pid_task(pid, type, p) {
52916+ if (!have_same_root(current, p)) {
52917+ ret = 1;
52918+ goto out;
52919+ }
52920+ } while_each_pid_task(pid, type, p);
52921+out:
52922+ read_unlock(&tasklist_lock);
52923+ return ret;
52924+#endif
52925+ return 0;
52926+}
52927+
52928+int
52929+gr_pid_is_chrooted(struct task_struct *p)
52930+{
52931+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52932+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
52933+ return 0;
52934+
52935+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
52936+ !have_same_root(current, p)) {
52937+ return 1;
52938+ }
52939+#endif
52940+ return 0;
52941+}
52942+
52943+EXPORT_SYMBOL(gr_pid_is_chrooted);
52944+
52945+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
52946+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
52947+{
52948+ struct dentry *dentry = (struct dentry *)u_dentry;
52949+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
52950+ struct dentry *realroot;
52951+ struct vfsmount *realrootmnt;
52952+ struct dentry *currentroot;
52953+ struct vfsmount *currentmnt;
52954+ struct task_struct *reaper = &init_task;
52955+ int ret = 1;
52956+
52957+ read_lock(&reaper->fs->lock);
52958+ realrootmnt = mntget(reaper->fs->root.mnt);
52959+ realroot = dget(reaper->fs->root.dentry);
52960+ read_unlock(&reaper->fs->lock);
52961+
52962+ read_lock(&current->fs->lock);
52963+ currentmnt = mntget(current->fs->root.mnt);
52964+ currentroot = dget(current->fs->root.dentry);
52965+ read_unlock(&current->fs->lock);
52966+
52967+ spin_lock(&dcache_lock);
52968+ for (;;) {
52969+ if (unlikely((dentry == realroot && mnt == realrootmnt)
52970+ || (dentry == currentroot && mnt == currentmnt)))
52971+ break;
52972+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
52973+ if (mnt->mnt_parent == mnt)
52974+ break;
52975+ dentry = mnt->mnt_mountpoint;
52976+ mnt = mnt->mnt_parent;
52977+ continue;
52978+ }
52979+ dentry = dentry->d_parent;
52980+ }
52981+ spin_unlock(&dcache_lock);
52982+
52983+ dput(currentroot);
52984+ mntput(currentmnt);
52985+
52986+ /* access is outside of chroot */
52987+ if (dentry == realroot && mnt == realrootmnt)
52988+ ret = 0;
52989+
52990+ dput(realroot);
52991+ mntput(realrootmnt);
52992+ return ret;
52993+}
52994+#endif
52995+
52996+int
52997+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
52998+{
52999+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53000+ if (!grsec_enable_chroot_fchdir)
53001+ return 1;
53002+
53003+ if (!proc_is_chrooted(current))
53004+ return 1;
53005+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
53006+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
53007+ return 0;
53008+ }
53009+#endif
53010+ return 1;
53011+}
53012+
53013+int
53014+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53015+ const time_t shm_createtime)
53016+{
53017+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53018+ struct task_struct *p;
53019+ time_t starttime;
53020+
53021+ if (unlikely(!grsec_enable_chroot_shmat))
53022+ return 1;
53023+
53024+ if (likely(!proc_is_chrooted(current)))
53025+ return 1;
53026+
53027+ rcu_read_lock();
53028+ read_lock(&tasklist_lock);
53029+
53030+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
53031+ starttime = p->start_time.tv_sec;
53032+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
53033+ if (have_same_root(current, p)) {
53034+ goto allow;
53035+ } else {
53036+ read_unlock(&tasklist_lock);
53037+ rcu_read_unlock();
53038+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
53039+ return 0;
53040+ }
53041+ }
53042+ /* creator exited, pid reuse, fall through to next check */
53043+ }
53044+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
53045+ if (unlikely(!have_same_root(current, p))) {
53046+ read_unlock(&tasklist_lock);
53047+ rcu_read_unlock();
53048+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
53049+ return 0;
53050+ }
53051+ }
53052+
53053+allow:
53054+ read_unlock(&tasklist_lock);
53055+ rcu_read_unlock();
53056+#endif
53057+ return 1;
53058+}
53059+
53060+void
53061+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
53062+{
53063+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53064+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
53065+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
53066+#endif
53067+ return;
53068+}
53069+
53070+int
53071+gr_handle_chroot_mknod(const struct dentry *dentry,
53072+ const struct vfsmount *mnt, const int mode)
53073+{
53074+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53075+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
53076+ proc_is_chrooted(current)) {
53077+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
53078+ return -EPERM;
53079+ }
53080+#endif
53081+ return 0;
53082+}
53083+
53084+int
53085+gr_handle_chroot_mount(const struct dentry *dentry,
53086+ const struct vfsmount *mnt, const char *dev_name)
53087+{
53088+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53089+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
53090+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
53091+ return -EPERM;
53092+ }
53093+#endif
53094+ return 0;
53095+}
53096+
53097+int
53098+gr_handle_chroot_pivot(void)
53099+{
53100+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53101+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
53102+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
53103+ return -EPERM;
53104+ }
53105+#endif
53106+ return 0;
53107+}
53108+
53109+int
53110+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
53111+{
53112+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53113+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
53114+ !gr_is_outside_chroot(dentry, mnt)) {
53115+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
53116+ return -EPERM;
53117+ }
53118+#endif
53119+ return 0;
53120+}
53121+
53122+int
53123+gr_handle_chroot_caps(struct path *path)
53124+{
53125+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53126+ if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
53127+ (init_task.fs->root.dentry != path->dentry) &&
53128+ (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
53129+
53130+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
53131+ const struct cred *old = current_cred();
53132+ struct cred *new = prepare_creds();
53133+ if (new == NULL)
53134+ return 1;
53135+
53136+ new->cap_permitted = cap_drop(old->cap_permitted,
53137+ chroot_caps);
53138+ new->cap_inheritable = cap_drop(old->cap_inheritable,
53139+ chroot_caps);
53140+ new->cap_effective = cap_drop(old->cap_effective,
53141+ chroot_caps);
53142+
53143+ commit_creds(new);
53144+
53145+ return 0;
53146+ }
53147+#endif
53148+ return 0;
53149+}
53150+
53151+int
53152+gr_handle_chroot_sysctl(const int op)
53153+{
53154+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53155+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
53156+ && (op & MAY_WRITE))
53157+ return -EACCES;
53158+#endif
53159+ return 0;
53160+}
53161+
53162+void
53163+gr_handle_chroot_chdir(struct path *path)
53164+{
53165+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53166+ if (grsec_enable_chroot_chdir)
53167+ set_fs_pwd(current->fs, path);
53168+#endif
53169+ return;
53170+}
53171+
53172+int
53173+gr_handle_chroot_chmod(const struct dentry *dentry,
53174+ const struct vfsmount *mnt, const int mode)
53175+{
53176+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53177+ /* allow chmod +s on directories, but not on files */
53178+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
53179+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
53180+ proc_is_chrooted(current)) {
53181+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
53182+ return -EPERM;
53183+ }
53184+#endif
53185+ return 0;
53186+}
53187+
53188+#ifdef CONFIG_SECURITY
53189+EXPORT_SYMBOL(gr_handle_chroot_caps);
53190+#endif
53191diff -urNp linux-2.6.32.45/grsecurity/grsec_disabled.c linux-2.6.32.45/grsecurity/grsec_disabled.c
53192--- linux-2.6.32.45/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
53193+++ linux-2.6.32.45/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
53194@@ -0,0 +1,447 @@
53195+#include <linux/kernel.h>
53196+#include <linux/module.h>
53197+#include <linux/sched.h>
53198+#include <linux/file.h>
53199+#include <linux/fs.h>
53200+#include <linux/kdev_t.h>
53201+#include <linux/net.h>
53202+#include <linux/in.h>
53203+#include <linux/ip.h>
53204+#include <linux/skbuff.h>
53205+#include <linux/sysctl.h>
53206+
53207+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
53208+void
53209+pax_set_initial_flags(struct linux_binprm *bprm)
53210+{
53211+ return;
53212+}
53213+#endif
53214+
53215+#ifdef CONFIG_SYSCTL
53216+__u32
53217+gr_handle_sysctl(const struct ctl_table * table, const int op)
53218+{
53219+ return 0;
53220+}
53221+#endif
53222+
53223+#ifdef CONFIG_TASKSTATS
53224+int gr_is_taskstats_denied(int pid)
53225+{
53226+ return 0;
53227+}
53228+#endif
53229+
53230+int
53231+gr_acl_is_enabled(void)
53232+{
53233+ return 0;
53234+}
53235+
53236+int
53237+gr_handle_rawio(const struct inode *inode)
53238+{
53239+ return 0;
53240+}
53241+
53242+void
53243+gr_acl_handle_psacct(struct task_struct *task, const long code)
53244+{
53245+ return;
53246+}
53247+
53248+int
53249+gr_handle_ptrace(struct task_struct *task, const long request)
53250+{
53251+ return 0;
53252+}
53253+
53254+int
53255+gr_handle_proc_ptrace(struct task_struct *task)
53256+{
53257+ return 0;
53258+}
53259+
53260+void
53261+gr_learn_resource(const struct task_struct *task,
53262+ const int res, const unsigned long wanted, const int gt)
53263+{
53264+ return;
53265+}
53266+
53267+int
53268+gr_set_acls(const int type)
53269+{
53270+ return 0;
53271+}
53272+
53273+int
53274+gr_check_hidden_task(const struct task_struct *tsk)
53275+{
53276+ return 0;
53277+}
53278+
53279+int
53280+gr_check_protected_task(const struct task_struct *task)
53281+{
53282+ return 0;
53283+}
53284+
53285+int
53286+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53287+{
53288+ return 0;
53289+}
53290+
53291+void
53292+gr_copy_label(struct task_struct *tsk)
53293+{
53294+ return;
53295+}
53296+
53297+void
53298+gr_set_pax_flags(struct task_struct *task)
53299+{
53300+ return;
53301+}
53302+
53303+int
53304+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53305+ const int unsafe_share)
53306+{
53307+ return 0;
53308+}
53309+
53310+void
53311+gr_handle_delete(const ino_t ino, const dev_t dev)
53312+{
53313+ return;
53314+}
53315+
53316+void
53317+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53318+{
53319+ return;
53320+}
53321+
53322+void
53323+gr_handle_crash(struct task_struct *task, const int sig)
53324+{
53325+ return;
53326+}
53327+
53328+int
53329+gr_check_crash_exec(const struct file *filp)
53330+{
53331+ return 0;
53332+}
53333+
53334+int
53335+gr_check_crash_uid(const uid_t uid)
53336+{
53337+ return 0;
53338+}
53339+
53340+void
53341+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53342+ struct dentry *old_dentry,
53343+ struct dentry *new_dentry,
53344+ struct vfsmount *mnt, const __u8 replace)
53345+{
53346+ return;
53347+}
53348+
53349+int
53350+gr_search_socket(const int family, const int type, const int protocol)
53351+{
53352+ return 1;
53353+}
53354+
53355+int
53356+gr_search_connectbind(const int mode, const struct socket *sock,
53357+ const struct sockaddr_in *addr)
53358+{
53359+ return 0;
53360+}
53361+
53362+int
53363+gr_is_capable(const int cap)
53364+{
53365+ return 1;
53366+}
53367+
53368+int
53369+gr_is_capable_nolog(const int cap)
53370+{
53371+ return 1;
53372+}
53373+
53374+void
53375+gr_handle_alertkill(struct task_struct *task)
53376+{
53377+ return;
53378+}
53379+
53380+__u32
53381+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
53382+{
53383+ return 1;
53384+}
53385+
53386+__u32
53387+gr_acl_handle_hidden_file(const struct dentry * dentry,
53388+ const struct vfsmount * mnt)
53389+{
53390+ return 1;
53391+}
53392+
53393+__u32
53394+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
53395+ const int fmode)
53396+{
53397+ return 1;
53398+}
53399+
53400+__u32
53401+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53402+{
53403+ return 1;
53404+}
53405+
53406+__u32
53407+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
53408+{
53409+ return 1;
53410+}
53411+
53412+int
53413+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
53414+ unsigned int *vm_flags)
53415+{
53416+ return 1;
53417+}
53418+
53419+__u32
53420+gr_acl_handle_truncate(const struct dentry * dentry,
53421+ const struct vfsmount * mnt)
53422+{
53423+ return 1;
53424+}
53425+
53426+__u32
53427+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
53428+{
53429+ return 1;
53430+}
53431+
53432+__u32
53433+gr_acl_handle_access(const struct dentry * dentry,
53434+ const struct vfsmount * mnt, const int fmode)
53435+{
53436+ return 1;
53437+}
53438+
53439+__u32
53440+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
53441+ mode_t mode)
53442+{
53443+ return 1;
53444+}
53445+
53446+__u32
53447+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
53448+ mode_t mode)
53449+{
53450+ return 1;
53451+}
53452+
53453+__u32
53454+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
53455+{
53456+ return 1;
53457+}
53458+
53459+__u32
53460+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
53461+{
53462+ return 1;
53463+}
53464+
53465+void
53466+grsecurity_init(void)
53467+{
53468+ return;
53469+}
53470+
53471+__u32
53472+gr_acl_handle_mknod(const struct dentry * new_dentry,
53473+ const struct dentry * parent_dentry,
53474+ const struct vfsmount * parent_mnt,
53475+ const int mode)
53476+{
53477+ return 1;
53478+}
53479+
53480+__u32
53481+gr_acl_handle_mkdir(const struct dentry * new_dentry,
53482+ const struct dentry * parent_dentry,
53483+ const struct vfsmount * parent_mnt)
53484+{
53485+ return 1;
53486+}
53487+
53488+__u32
53489+gr_acl_handle_symlink(const struct dentry * new_dentry,
53490+ const struct dentry * parent_dentry,
53491+ const struct vfsmount * parent_mnt, const char *from)
53492+{
53493+ return 1;
53494+}
53495+
53496+__u32
53497+gr_acl_handle_link(const struct dentry * new_dentry,
53498+ const struct dentry * parent_dentry,
53499+ const struct vfsmount * parent_mnt,
53500+ const struct dentry * old_dentry,
53501+ const struct vfsmount * old_mnt, const char *to)
53502+{
53503+ return 1;
53504+}
53505+
53506+int
53507+gr_acl_handle_rename(const struct dentry *new_dentry,
53508+ const struct dentry *parent_dentry,
53509+ const struct vfsmount *parent_mnt,
53510+ const struct dentry *old_dentry,
53511+ const struct inode *old_parent_inode,
53512+ const struct vfsmount *old_mnt, const char *newname)
53513+{
53514+ return 0;
53515+}
53516+
53517+int
53518+gr_acl_handle_filldir(const struct file *file, const char *name,
53519+ const int namelen, const ino_t ino)
53520+{
53521+ return 1;
53522+}
53523+
53524+int
53525+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53526+ const time_t shm_createtime, const uid_t cuid, const int shmid)
53527+{
53528+ return 1;
53529+}
53530+
53531+int
53532+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
53533+{
53534+ return 0;
53535+}
53536+
53537+int
53538+gr_search_accept(const struct socket *sock)
53539+{
53540+ return 0;
53541+}
53542+
53543+int
53544+gr_search_listen(const struct socket *sock)
53545+{
53546+ return 0;
53547+}
53548+
53549+int
53550+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53551+{
53552+ return 0;
53553+}
53554+
53555+__u32
53556+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53557+{
53558+ return 1;
53559+}
53560+
53561+__u32
53562+gr_acl_handle_creat(const struct dentry * dentry,
53563+ const struct dentry * p_dentry,
53564+ const struct vfsmount * p_mnt, const int fmode,
53565+ const int imode)
53566+{
53567+ return 1;
53568+}
53569+
53570+void
53571+gr_acl_handle_exit(void)
53572+{
53573+ return;
53574+}
53575+
53576+int
53577+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53578+{
53579+ return 1;
53580+}
53581+
53582+void
53583+gr_set_role_label(const uid_t uid, const gid_t gid)
53584+{
53585+ return;
53586+}
53587+
53588+int
53589+gr_acl_handle_procpidmem(const struct task_struct *task)
53590+{
53591+ return 0;
53592+}
53593+
53594+int
53595+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53596+{
53597+ return 0;
53598+}
53599+
53600+int
53601+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53602+{
53603+ return 0;
53604+}
53605+
53606+void
53607+gr_set_kernel_label(struct task_struct *task)
53608+{
53609+ return;
53610+}
53611+
53612+int
53613+gr_check_user_change(int real, int effective, int fs)
53614+{
53615+ return 0;
53616+}
53617+
53618+int
53619+gr_check_group_change(int real, int effective, int fs)
53620+{
53621+ return 0;
53622+}
53623+
53624+int gr_acl_enable_at_secure(void)
53625+{
53626+ return 0;
53627+}
53628+
53629+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53630+{
53631+ return dentry->d_inode->i_sb->s_dev;
53632+}
53633+
53634+EXPORT_SYMBOL(gr_is_capable);
53635+EXPORT_SYMBOL(gr_is_capable_nolog);
53636+EXPORT_SYMBOL(gr_learn_resource);
53637+EXPORT_SYMBOL(gr_set_kernel_label);
53638+#ifdef CONFIG_SECURITY
53639+EXPORT_SYMBOL(gr_check_user_change);
53640+EXPORT_SYMBOL(gr_check_group_change);
53641+#endif
53642diff -urNp linux-2.6.32.45/grsecurity/grsec_exec.c linux-2.6.32.45/grsecurity/grsec_exec.c
53643--- linux-2.6.32.45/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53644+++ linux-2.6.32.45/grsecurity/grsec_exec.c 2011-08-11 19:57:19.000000000 -0400
53645@@ -0,0 +1,132 @@
53646+#include <linux/kernel.h>
53647+#include <linux/sched.h>
53648+#include <linux/file.h>
53649+#include <linux/binfmts.h>
53650+#include <linux/smp_lock.h>
53651+#include <linux/fs.h>
53652+#include <linux/types.h>
53653+#include <linux/grdefs.h>
53654+#include <linux/grinternal.h>
53655+#include <linux/capability.h>
53656+#include <linux/compat.h>
53657+
53658+#include <asm/uaccess.h>
53659+
53660+#ifdef CONFIG_GRKERNSEC_EXECLOG
53661+static char gr_exec_arg_buf[132];
53662+static DEFINE_MUTEX(gr_exec_arg_mutex);
53663+#endif
53664+
53665+void
53666+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
53667+{
53668+#ifdef CONFIG_GRKERNSEC_EXECLOG
53669+ char *grarg = gr_exec_arg_buf;
53670+ unsigned int i, x, execlen = 0;
53671+ char c;
53672+
53673+ if (!((grsec_enable_execlog && grsec_enable_group &&
53674+ in_group_p(grsec_audit_gid))
53675+ || (grsec_enable_execlog && !grsec_enable_group)))
53676+ return;
53677+
53678+ mutex_lock(&gr_exec_arg_mutex);
53679+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
53680+
53681+ if (unlikely(argv == NULL))
53682+ goto log;
53683+
53684+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
53685+ const char __user *p;
53686+ unsigned int len;
53687+
53688+ if (copy_from_user(&p, argv + i, sizeof(p)))
53689+ goto log;
53690+ if (!p)
53691+ goto log;
53692+ len = strnlen_user(p, 128 - execlen);
53693+ if (len > 128 - execlen)
53694+ len = 128 - execlen;
53695+ else if (len > 0)
53696+ len--;
53697+ if (copy_from_user(grarg + execlen, p, len))
53698+ goto log;
53699+
53700+ /* rewrite unprintable characters */
53701+ for (x = 0; x < len; x++) {
53702+ c = *(grarg + execlen + x);
53703+ if (c < 32 || c > 126)
53704+ *(grarg + execlen + x) = ' ';
53705+ }
53706+
53707+ execlen += len;
53708+ *(grarg + execlen) = ' ';
53709+ *(grarg + execlen + 1) = '\0';
53710+ execlen++;
53711+ }
53712+
53713+ log:
53714+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53715+ bprm->file->f_path.mnt, grarg);
53716+ mutex_unlock(&gr_exec_arg_mutex);
53717+#endif
53718+ return;
53719+}
53720+
53721+#ifdef CONFIG_COMPAT
53722+void
53723+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
53724+{
53725+#ifdef CONFIG_GRKERNSEC_EXECLOG
53726+ char *grarg = gr_exec_arg_buf;
53727+ unsigned int i, x, execlen = 0;
53728+ char c;
53729+
53730+ if (!((grsec_enable_execlog && grsec_enable_group &&
53731+ in_group_p(grsec_audit_gid))
53732+ || (grsec_enable_execlog && !grsec_enable_group)))
53733+ return;
53734+
53735+ mutex_lock(&gr_exec_arg_mutex);
53736+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
53737+
53738+ if (unlikely(argv == NULL))
53739+ goto log;
53740+
53741+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
53742+ compat_uptr_t p;
53743+ unsigned int len;
53744+
53745+ if (get_user(p, argv + i))
53746+ goto log;
53747+ len = strnlen_user(compat_ptr(p), 128 - execlen);
53748+ if (len > 128 - execlen)
53749+ len = 128 - execlen;
53750+ else if (len > 0)
53751+ len--;
53752+ else
53753+ goto log;
53754+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
53755+ goto log;
53756+
53757+ /* rewrite unprintable characters */
53758+ for (x = 0; x < len; x++) {
53759+ c = *(grarg + execlen + x);
53760+ if (c < 32 || c > 126)
53761+ *(grarg + execlen + x) = ' ';
53762+ }
53763+
53764+ execlen += len;
53765+ *(grarg + execlen) = ' ';
53766+ *(grarg + execlen + 1) = '\0';
53767+ execlen++;
53768+ }
53769+
53770+ log:
53771+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
53772+ bprm->file->f_path.mnt, grarg);
53773+ mutex_unlock(&gr_exec_arg_mutex);
53774+#endif
53775+ return;
53776+}
53777+#endif
53778diff -urNp linux-2.6.32.45/grsecurity/grsec_fifo.c linux-2.6.32.45/grsecurity/grsec_fifo.c
53779--- linux-2.6.32.45/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
53780+++ linux-2.6.32.45/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
53781@@ -0,0 +1,24 @@
53782+#include <linux/kernel.h>
53783+#include <linux/sched.h>
53784+#include <linux/fs.h>
53785+#include <linux/file.h>
53786+#include <linux/grinternal.h>
53787+
53788+int
53789+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
53790+ const struct dentry *dir, const int flag, const int acc_mode)
53791+{
53792+#ifdef CONFIG_GRKERNSEC_FIFO
53793+ const struct cred *cred = current_cred();
53794+
53795+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
53796+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
53797+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
53798+ (cred->fsuid != dentry->d_inode->i_uid)) {
53799+ if (!inode_permission(dentry->d_inode, acc_mode))
53800+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
53801+ return -EACCES;
53802+ }
53803+#endif
53804+ return 0;
53805+}
53806diff -urNp linux-2.6.32.45/grsecurity/grsec_fork.c linux-2.6.32.45/grsecurity/grsec_fork.c
53807--- linux-2.6.32.45/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
53808+++ linux-2.6.32.45/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
53809@@ -0,0 +1,23 @@
53810+#include <linux/kernel.h>
53811+#include <linux/sched.h>
53812+#include <linux/grsecurity.h>
53813+#include <linux/grinternal.h>
53814+#include <linux/errno.h>
53815+
53816+void
53817+gr_log_forkfail(const int retval)
53818+{
53819+#ifdef CONFIG_GRKERNSEC_FORKFAIL
53820+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
53821+ switch (retval) {
53822+ case -EAGAIN:
53823+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
53824+ break;
53825+ case -ENOMEM:
53826+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
53827+ break;
53828+ }
53829+ }
53830+#endif
53831+ return;
53832+}
53833diff -urNp linux-2.6.32.45/grsecurity/grsec_init.c linux-2.6.32.45/grsecurity/grsec_init.c
53834--- linux-2.6.32.45/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
53835+++ linux-2.6.32.45/grsecurity/grsec_init.c 2011-08-11 19:57:42.000000000 -0400
53836@@ -0,0 +1,270 @@
53837+#include <linux/kernel.h>
53838+#include <linux/sched.h>
53839+#include <linux/mm.h>
53840+#include <linux/smp_lock.h>
53841+#include <linux/gracl.h>
53842+#include <linux/slab.h>
53843+#include <linux/vmalloc.h>
53844+#include <linux/percpu.h>
53845+#include <linux/module.h>
53846+
53847+int grsec_enable_brute;
53848+int grsec_enable_link;
53849+int grsec_enable_dmesg;
53850+int grsec_enable_harden_ptrace;
53851+int grsec_enable_fifo;
53852+int grsec_enable_execlog;
53853+int grsec_enable_signal;
53854+int grsec_enable_forkfail;
53855+int grsec_enable_audit_ptrace;
53856+int grsec_enable_time;
53857+int grsec_enable_audit_textrel;
53858+int grsec_enable_group;
53859+int grsec_audit_gid;
53860+int grsec_enable_chdir;
53861+int grsec_enable_mount;
53862+int grsec_enable_rofs;
53863+int grsec_enable_chroot_findtask;
53864+int grsec_enable_chroot_mount;
53865+int grsec_enable_chroot_shmat;
53866+int grsec_enable_chroot_fchdir;
53867+int grsec_enable_chroot_double;
53868+int grsec_enable_chroot_pivot;
53869+int grsec_enable_chroot_chdir;
53870+int grsec_enable_chroot_chmod;
53871+int grsec_enable_chroot_mknod;
53872+int grsec_enable_chroot_nice;
53873+int grsec_enable_chroot_execlog;
53874+int grsec_enable_chroot_caps;
53875+int grsec_enable_chroot_sysctl;
53876+int grsec_enable_chroot_unix;
53877+int grsec_enable_tpe;
53878+int grsec_tpe_gid;
53879+int grsec_enable_blackhole;
53880+#ifdef CONFIG_IPV6_MODULE
53881+EXPORT_SYMBOL(grsec_enable_blackhole);
53882+#endif
53883+int grsec_lastack_retries;
53884+int grsec_enable_tpe_all;
53885+int grsec_enable_tpe_invert;
53886+int grsec_enable_socket_all;
53887+int grsec_socket_all_gid;
53888+int grsec_enable_socket_client;
53889+int grsec_socket_client_gid;
53890+int grsec_enable_socket_server;
53891+int grsec_socket_server_gid;
53892+int grsec_resource_logging;
53893+int grsec_disable_privio;
53894+int grsec_enable_log_rwxmaps;
53895+int grsec_lock;
53896+
53897+DEFINE_SPINLOCK(grsec_alert_lock);
53898+unsigned long grsec_alert_wtime = 0;
53899+unsigned long grsec_alert_fyet = 0;
53900+
53901+DEFINE_SPINLOCK(grsec_audit_lock);
53902+
53903+DEFINE_RWLOCK(grsec_exec_file_lock);
53904+
53905+char *gr_shared_page[4];
53906+
53907+char *gr_alert_log_fmt;
53908+char *gr_audit_log_fmt;
53909+char *gr_alert_log_buf;
53910+char *gr_audit_log_buf;
53911+
53912+extern struct gr_arg *gr_usermode;
53913+extern unsigned char *gr_system_salt;
53914+extern unsigned char *gr_system_sum;
53915+
53916+void __init
53917+grsecurity_init(void)
53918+{
53919+ int j;
53920+ /* create the per-cpu shared pages */
53921+
53922+#ifdef CONFIG_X86
53923+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
53924+#endif
53925+
53926+ for (j = 0; j < 4; j++) {
53927+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
53928+ if (gr_shared_page[j] == NULL) {
53929+ panic("Unable to allocate grsecurity shared page");
53930+ return;
53931+ }
53932+ }
53933+
53934+ /* allocate log buffers */
53935+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
53936+ if (!gr_alert_log_fmt) {
53937+ panic("Unable to allocate grsecurity alert log format buffer");
53938+ return;
53939+ }
53940+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
53941+ if (!gr_audit_log_fmt) {
53942+ panic("Unable to allocate grsecurity audit log format buffer");
53943+ return;
53944+ }
53945+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53946+ if (!gr_alert_log_buf) {
53947+ panic("Unable to allocate grsecurity alert log buffer");
53948+ return;
53949+ }
53950+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
53951+ if (!gr_audit_log_buf) {
53952+ panic("Unable to allocate grsecurity audit log buffer");
53953+ return;
53954+ }
53955+
53956+ /* allocate memory for authentication structure */
53957+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
53958+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
53959+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
53960+
53961+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
53962+ panic("Unable to allocate grsecurity authentication structure");
53963+ return;
53964+ }
53965+
53966+
53967+#ifdef CONFIG_GRKERNSEC_IO
53968+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
53969+ grsec_disable_privio = 1;
53970+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53971+ grsec_disable_privio = 1;
53972+#else
53973+ grsec_disable_privio = 0;
53974+#endif
53975+#endif
53976+
53977+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53978+ /* for backward compatibility, tpe_invert always defaults to on if
53979+ enabled in the kernel
53980+ */
53981+ grsec_enable_tpe_invert = 1;
53982+#endif
53983+
53984+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
53985+#ifndef CONFIG_GRKERNSEC_SYSCTL
53986+ grsec_lock = 1;
53987+#endif
53988+
53989+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
53990+ grsec_enable_audit_textrel = 1;
53991+#endif
53992+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53993+ grsec_enable_log_rwxmaps = 1;
53994+#endif
53995+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
53996+ grsec_enable_group = 1;
53997+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
53998+#endif
53999+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54000+ grsec_enable_chdir = 1;
54001+#endif
54002+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54003+ grsec_enable_harden_ptrace = 1;
54004+#endif
54005+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54006+ grsec_enable_mount = 1;
54007+#endif
54008+#ifdef CONFIG_GRKERNSEC_LINK
54009+ grsec_enable_link = 1;
54010+#endif
54011+#ifdef CONFIG_GRKERNSEC_BRUTE
54012+ grsec_enable_brute = 1;
54013+#endif
54014+#ifdef CONFIG_GRKERNSEC_DMESG
54015+ grsec_enable_dmesg = 1;
54016+#endif
54017+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54018+ grsec_enable_blackhole = 1;
54019+ grsec_lastack_retries = 4;
54020+#endif
54021+#ifdef CONFIG_GRKERNSEC_FIFO
54022+ grsec_enable_fifo = 1;
54023+#endif
54024+#ifdef CONFIG_GRKERNSEC_EXECLOG
54025+ grsec_enable_execlog = 1;
54026+#endif
54027+#ifdef CONFIG_GRKERNSEC_SIGNAL
54028+ grsec_enable_signal = 1;
54029+#endif
54030+#ifdef CONFIG_GRKERNSEC_FORKFAIL
54031+ grsec_enable_forkfail = 1;
54032+#endif
54033+#ifdef CONFIG_GRKERNSEC_TIME
54034+ grsec_enable_time = 1;
54035+#endif
54036+#ifdef CONFIG_GRKERNSEC_RESLOG
54037+ grsec_resource_logging = 1;
54038+#endif
54039+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54040+ grsec_enable_chroot_findtask = 1;
54041+#endif
54042+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54043+ grsec_enable_chroot_unix = 1;
54044+#endif
54045+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54046+ grsec_enable_chroot_mount = 1;
54047+#endif
54048+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54049+ grsec_enable_chroot_fchdir = 1;
54050+#endif
54051+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54052+ grsec_enable_chroot_shmat = 1;
54053+#endif
54054+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54055+ grsec_enable_audit_ptrace = 1;
54056+#endif
54057+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54058+ grsec_enable_chroot_double = 1;
54059+#endif
54060+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54061+ grsec_enable_chroot_pivot = 1;
54062+#endif
54063+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54064+ grsec_enable_chroot_chdir = 1;
54065+#endif
54066+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54067+ grsec_enable_chroot_chmod = 1;
54068+#endif
54069+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54070+ grsec_enable_chroot_mknod = 1;
54071+#endif
54072+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54073+ grsec_enable_chroot_nice = 1;
54074+#endif
54075+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54076+ grsec_enable_chroot_execlog = 1;
54077+#endif
54078+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54079+ grsec_enable_chroot_caps = 1;
54080+#endif
54081+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54082+ grsec_enable_chroot_sysctl = 1;
54083+#endif
54084+#ifdef CONFIG_GRKERNSEC_TPE
54085+ grsec_enable_tpe = 1;
54086+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
54087+#ifdef CONFIG_GRKERNSEC_TPE_ALL
54088+ grsec_enable_tpe_all = 1;
54089+#endif
54090+#endif
54091+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54092+ grsec_enable_socket_all = 1;
54093+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
54094+#endif
54095+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54096+ grsec_enable_socket_client = 1;
54097+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
54098+#endif
54099+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54100+ grsec_enable_socket_server = 1;
54101+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
54102+#endif
54103+#endif
54104+
54105+ return;
54106+}
54107diff -urNp linux-2.6.32.45/grsecurity/grsec_link.c linux-2.6.32.45/grsecurity/grsec_link.c
54108--- linux-2.6.32.45/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
54109+++ linux-2.6.32.45/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
54110@@ -0,0 +1,43 @@
54111+#include <linux/kernel.h>
54112+#include <linux/sched.h>
54113+#include <linux/fs.h>
54114+#include <linux/file.h>
54115+#include <linux/grinternal.h>
54116+
54117+int
54118+gr_handle_follow_link(const struct inode *parent,
54119+ const struct inode *inode,
54120+ const struct dentry *dentry, const struct vfsmount *mnt)
54121+{
54122+#ifdef CONFIG_GRKERNSEC_LINK
54123+ const struct cred *cred = current_cred();
54124+
54125+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
54126+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
54127+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
54128+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
54129+ return -EACCES;
54130+ }
54131+#endif
54132+ return 0;
54133+}
54134+
54135+int
54136+gr_handle_hardlink(const struct dentry *dentry,
54137+ const struct vfsmount *mnt,
54138+ struct inode *inode, const int mode, const char *to)
54139+{
54140+#ifdef CONFIG_GRKERNSEC_LINK
54141+ const struct cred *cred = current_cred();
54142+
54143+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
54144+ (!S_ISREG(mode) || (mode & S_ISUID) ||
54145+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
54146+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
54147+ !capable(CAP_FOWNER) && cred->uid) {
54148+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
54149+ return -EPERM;
54150+ }
54151+#endif
54152+ return 0;
54153+}
54154diff -urNp linux-2.6.32.45/grsecurity/grsec_log.c linux-2.6.32.45/grsecurity/grsec_log.c
54155--- linux-2.6.32.45/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
54156+++ linux-2.6.32.45/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
54157@@ -0,0 +1,310 @@
54158+#include <linux/kernel.h>
54159+#include <linux/sched.h>
54160+#include <linux/file.h>
54161+#include <linux/tty.h>
54162+#include <linux/fs.h>
54163+#include <linux/grinternal.h>
54164+
54165+#ifdef CONFIG_TREE_PREEMPT_RCU
54166+#define DISABLE_PREEMPT() preempt_disable()
54167+#define ENABLE_PREEMPT() preempt_enable()
54168+#else
54169+#define DISABLE_PREEMPT()
54170+#define ENABLE_PREEMPT()
54171+#endif
54172+
54173+#define BEGIN_LOCKS(x) \
54174+ DISABLE_PREEMPT(); \
54175+ rcu_read_lock(); \
54176+ read_lock(&tasklist_lock); \
54177+ read_lock(&grsec_exec_file_lock); \
54178+ if (x != GR_DO_AUDIT) \
54179+ spin_lock(&grsec_alert_lock); \
54180+ else \
54181+ spin_lock(&grsec_audit_lock)
54182+
54183+#define END_LOCKS(x) \
54184+ if (x != GR_DO_AUDIT) \
54185+ spin_unlock(&grsec_alert_lock); \
54186+ else \
54187+ spin_unlock(&grsec_audit_lock); \
54188+ read_unlock(&grsec_exec_file_lock); \
54189+ read_unlock(&tasklist_lock); \
54190+ rcu_read_unlock(); \
54191+ ENABLE_PREEMPT(); \
54192+ if (x == GR_DONT_AUDIT) \
54193+ gr_handle_alertkill(current)
54194+
54195+enum {
54196+ FLOODING,
54197+ NO_FLOODING
54198+};
54199+
54200+extern char *gr_alert_log_fmt;
54201+extern char *gr_audit_log_fmt;
54202+extern char *gr_alert_log_buf;
54203+extern char *gr_audit_log_buf;
54204+
54205+static int gr_log_start(int audit)
54206+{
54207+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
54208+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
54209+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54210+
54211+ if (audit == GR_DO_AUDIT)
54212+ goto set_fmt;
54213+
54214+ if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
54215+ grsec_alert_wtime = jiffies;
54216+ grsec_alert_fyet = 0;
54217+ } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
54218+ grsec_alert_fyet++;
54219+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
54220+ grsec_alert_wtime = jiffies;
54221+ grsec_alert_fyet++;
54222+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
54223+ return FLOODING;
54224+ } else return FLOODING;
54225+
54226+set_fmt:
54227+ memset(buf, 0, PAGE_SIZE);
54228+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
54229+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
54230+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54231+ } else if (current->signal->curr_ip) {
54232+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
54233+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
54234+ } else if (gr_acl_is_enabled()) {
54235+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
54236+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54237+ } else {
54238+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
54239+ strcpy(buf, fmt);
54240+ }
54241+
54242+ return NO_FLOODING;
54243+}
54244+
54245+static void gr_log_middle(int audit, const char *msg, va_list ap)
54246+ __attribute__ ((format (printf, 2, 0)));
54247+
54248+static void gr_log_middle(int audit, const char *msg, va_list ap)
54249+{
54250+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54251+ unsigned int len = strlen(buf);
54252+
54253+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54254+
54255+ return;
54256+}
54257+
54258+static void gr_log_middle_varargs(int audit, const char *msg, ...)
54259+ __attribute__ ((format (printf, 2, 3)));
54260+
54261+static void gr_log_middle_varargs(int audit, const char *msg, ...)
54262+{
54263+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54264+ unsigned int len = strlen(buf);
54265+ va_list ap;
54266+
54267+ va_start(ap, msg);
54268+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54269+ va_end(ap);
54270+
54271+ return;
54272+}
54273+
54274+static void gr_log_end(int audit)
54275+{
54276+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54277+ unsigned int len = strlen(buf);
54278+
54279+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
54280+ printk("%s\n", buf);
54281+
54282+ return;
54283+}
54284+
54285+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
54286+{
54287+ int logtype;
54288+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
54289+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
54290+ void *voidptr = NULL;
54291+ int num1 = 0, num2 = 0;
54292+ unsigned long ulong1 = 0, ulong2 = 0;
54293+ struct dentry *dentry = NULL;
54294+ struct vfsmount *mnt = NULL;
54295+ struct file *file = NULL;
54296+ struct task_struct *task = NULL;
54297+ const struct cred *cred, *pcred;
54298+ va_list ap;
54299+
54300+ BEGIN_LOCKS(audit);
54301+ logtype = gr_log_start(audit);
54302+ if (logtype == FLOODING) {
54303+ END_LOCKS(audit);
54304+ return;
54305+ }
54306+ va_start(ap, argtypes);
54307+ switch (argtypes) {
54308+ case GR_TTYSNIFF:
54309+ task = va_arg(ap, struct task_struct *);
54310+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
54311+ break;
54312+ case GR_SYSCTL_HIDDEN:
54313+ str1 = va_arg(ap, char *);
54314+ gr_log_middle_varargs(audit, msg, result, str1);
54315+ break;
54316+ case GR_RBAC:
54317+ dentry = va_arg(ap, struct dentry *);
54318+ mnt = va_arg(ap, struct vfsmount *);
54319+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
54320+ break;
54321+ case GR_RBAC_STR:
54322+ dentry = va_arg(ap, struct dentry *);
54323+ mnt = va_arg(ap, struct vfsmount *);
54324+ str1 = va_arg(ap, char *);
54325+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
54326+ break;
54327+ case GR_STR_RBAC:
54328+ str1 = va_arg(ap, char *);
54329+ dentry = va_arg(ap, struct dentry *);
54330+ mnt = va_arg(ap, struct vfsmount *);
54331+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
54332+ break;
54333+ case GR_RBAC_MODE2:
54334+ dentry = va_arg(ap, struct dentry *);
54335+ mnt = va_arg(ap, struct vfsmount *);
54336+ str1 = va_arg(ap, char *);
54337+ str2 = va_arg(ap, char *);
54338+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
54339+ break;
54340+ case GR_RBAC_MODE3:
54341+ dentry = va_arg(ap, struct dentry *);
54342+ mnt = va_arg(ap, struct vfsmount *);
54343+ str1 = va_arg(ap, char *);
54344+ str2 = va_arg(ap, char *);
54345+ str3 = va_arg(ap, char *);
54346+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
54347+ break;
54348+ case GR_FILENAME:
54349+ dentry = va_arg(ap, struct dentry *);
54350+ mnt = va_arg(ap, struct vfsmount *);
54351+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
54352+ break;
54353+ case GR_STR_FILENAME:
54354+ str1 = va_arg(ap, char *);
54355+ dentry = va_arg(ap, struct dentry *);
54356+ mnt = va_arg(ap, struct vfsmount *);
54357+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
54358+ break;
54359+ case GR_FILENAME_STR:
54360+ dentry = va_arg(ap, struct dentry *);
54361+ mnt = va_arg(ap, struct vfsmount *);
54362+ str1 = va_arg(ap, char *);
54363+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
54364+ break;
54365+ case GR_FILENAME_TWO_INT:
54366+ dentry = va_arg(ap, struct dentry *);
54367+ mnt = va_arg(ap, struct vfsmount *);
54368+ num1 = va_arg(ap, int);
54369+ num2 = va_arg(ap, int);
54370+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
54371+ break;
54372+ case GR_FILENAME_TWO_INT_STR:
54373+ dentry = va_arg(ap, struct dentry *);
54374+ mnt = va_arg(ap, struct vfsmount *);
54375+ num1 = va_arg(ap, int);
54376+ num2 = va_arg(ap, int);
54377+ str1 = va_arg(ap, char *);
54378+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
54379+ break;
54380+ case GR_TEXTREL:
54381+ file = va_arg(ap, struct file *);
54382+ ulong1 = va_arg(ap, unsigned long);
54383+ ulong2 = va_arg(ap, unsigned long);
54384+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
54385+ break;
54386+ case GR_PTRACE:
54387+ task = va_arg(ap, struct task_struct *);
54388+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
54389+ break;
54390+ case GR_RESOURCE:
54391+ task = va_arg(ap, struct task_struct *);
54392+ cred = __task_cred(task);
54393+ pcred = __task_cred(task->real_parent);
54394+ ulong1 = va_arg(ap, unsigned long);
54395+ str1 = va_arg(ap, char *);
54396+ ulong2 = va_arg(ap, unsigned long);
54397+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54398+ break;
54399+ case GR_CAP:
54400+ task = va_arg(ap, struct task_struct *);
54401+ cred = __task_cred(task);
54402+ pcred = __task_cred(task->real_parent);
54403+ str1 = va_arg(ap, char *);
54404+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54405+ break;
54406+ case GR_SIG:
54407+ str1 = va_arg(ap, char *);
54408+ voidptr = va_arg(ap, void *);
54409+ gr_log_middle_varargs(audit, msg, str1, voidptr);
54410+ break;
54411+ case GR_SIG2:
54412+ task = va_arg(ap, struct task_struct *);
54413+ cred = __task_cred(task);
54414+ pcred = __task_cred(task->real_parent);
54415+ num1 = va_arg(ap, int);
54416+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54417+ break;
54418+ case GR_CRASH1:
54419+ task = va_arg(ap, struct task_struct *);
54420+ cred = __task_cred(task);
54421+ pcred = __task_cred(task->real_parent);
54422+ ulong1 = va_arg(ap, unsigned long);
54423+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
54424+ break;
54425+ case GR_CRASH2:
54426+ task = va_arg(ap, struct task_struct *);
54427+ cred = __task_cred(task);
54428+ pcred = __task_cred(task->real_parent);
54429+ ulong1 = va_arg(ap, unsigned long);
54430+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
54431+ break;
54432+ case GR_RWXMAP:
54433+ file = va_arg(ap, struct file *);
54434+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
54435+ break;
54436+ case GR_PSACCT:
54437+ {
54438+ unsigned int wday, cday;
54439+ __u8 whr, chr;
54440+ __u8 wmin, cmin;
54441+ __u8 wsec, csec;
54442+ char cur_tty[64] = { 0 };
54443+ char parent_tty[64] = { 0 };
54444+
54445+ task = va_arg(ap, struct task_struct *);
54446+ wday = va_arg(ap, unsigned int);
54447+ cday = va_arg(ap, unsigned int);
54448+ whr = va_arg(ap, int);
54449+ chr = va_arg(ap, int);
54450+ wmin = va_arg(ap, int);
54451+ cmin = va_arg(ap, int);
54452+ wsec = va_arg(ap, int);
54453+ csec = va_arg(ap, int);
54454+ ulong1 = va_arg(ap, unsigned long);
54455+ cred = __task_cred(task);
54456+ pcred = __task_cred(task->real_parent);
54457+
54458+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54459+ }
54460+ break;
54461+ default:
54462+ gr_log_middle(audit, msg, ap);
54463+ }
54464+ va_end(ap);
54465+ gr_log_end(audit);
54466+ END_LOCKS(audit);
54467+}
54468diff -urNp linux-2.6.32.45/grsecurity/grsec_mem.c linux-2.6.32.45/grsecurity/grsec_mem.c
54469--- linux-2.6.32.45/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
54470+++ linux-2.6.32.45/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
54471@@ -0,0 +1,33 @@
54472+#include <linux/kernel.h>
54473+#include <linux/sched.h>
54474+#include <linux/mm.h>
54475+#include <linux/mman.h>
54476+#include <linux/grinternal.h>
54477+
54478+void
54479+gr_handle_ioperm(void)
54480+{
54481+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
54482+ return;
54483+}
54484+
54485+void
54486+gr_handle_iopl(void)
54487+{
54488+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
54489+ return;
54490+}
54491+
54492+void
54493+gr_handle_mem_readwrite(u64 from, u64 to)
54494+{
54495+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
54496+ return;
54497+}
54498+
54499+void
54500+gr_handle_vm86(void)
54501+{
54502+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
54503+ return;
54504+}
54505diff -urNp linux-2.6.32.45/grsecurity/grsec_mount.c linux-2.6.32.45/grsecurity/grsec_mount.c
54506--- linux-2.6.32.45/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
54507+++ linux-2.6.32.45/grsecurity/grsec_mount.c 2011-06-20 19:47:03.000000000 -0400
54508@@ -0,0 +1,62 @@
54509+#include <linux/kernel.h>
54510+#include <linux/sched.h>
54511+#include <linux/mount.h>
54512+#include <linux/grsecurity.h>
54513+#include <linux/grinternal.h>
54514+
54515+void
54516+gr_log_remount(const char *devname, const int retval)
54517+{
54518+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54519+ if (grsec_enable_mount && (retval >= 0))
54520+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
54521+#endif
54522+ return;
54523+}
54524+
54525+void
54526+gr_log_unmount(const char *devname, const int retval)
54527+{
54528+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54529+ if (grsec_enable_mount && (retval >= 0))
54530+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
54531+#endif
54532+ return;
54533+}
54534+
54535+void
54536+gr_log_mount(const char *from, const char *to, const int retval)
54537+{
54538+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54539+ if (grsec_enable_mount && (retval >= 0))
54540+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54541+#endif
54542+ return;
54543+}
54544+
54545+int
54546+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54547+{
54548+#ifdef CONFIG_GRKERNSEC_ROFS
54549+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54550+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54551+ return -EPERM;
54552+ } else
54553+ return 0;
54554+#endif
54555+ return 0;
54556+}
54557+
54558+int
54559+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54560+{
54561+#ifdef CONFIG_GRKERNSEC_ROFS
54562+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54563+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54564+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54565+ return -EPERM;
54566+ } else
54567+ return 0;
54568+#endif
54569+ return 0;
54570+}
54571diff -urNp linux-2.6.32.45/grsecurity/grsec_pax.c linux-2.6.32.45/grsecurity/grsec_pax.c
54572--- linux-2.6.32.45/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54573+++ linux-2.6.32.45/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
54574@@ -0,0 +1,36 @@
54575+#include <linux/kernel.h>
54576+#include <linux/sched.h>
54577+#include <linux/mm.h>
54578+#include <linux/file.h>
54579+#include <linux/grinternal.h>
54580+#include <linux/grsecurity.h>
54581+
54582+void
54583+gr_log_textrel(struct vm_area_struct * vma)
54584+{
54585+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54586+ if (grsec_enable_audit_textrel)
54587+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54588+#endif
54589+ return;
54590+}
54591+
54592+void
54593+gr_log_rwxmmap(struct file *file)
54594+{
54595+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54596+ if (grsec_enable_log_rwxmaps)
54597+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54598+#endif
54599+ return;
54600+}
54601+
54602+void
54603+gr_log_rwxmprotect(struct file *file)
54604+{
54605+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54606+ if (grsec_enable_log_rwxmaps)
54607+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54608+#endif
54609+ return;
54610+}
54611diff -urNp linux-2.6.32.45/grsecurity/grsec_ptrace.c linux-2.6.32.45/grsecurity/grsec_ptrace.c
54612--- linux-2.6.32.45/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54613+++ linux-2.6.32.45/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
54614@@ -0,0 +1,14 @@
54615+#include <linux/kernel.h>
54616+#include <linux/sched.h>
54617+#include <linux/grinternal.h>
54618+#include <linux/grsecurity.h>
54619+
54620+void
54621+gr_audit_ptrace(struct task_struct *task)
54622+{
54623+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54624+ if (grsec_enable_audit_ptrace)
54625+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54626+#endif
54627+ return;
54628+}
54629diff -urNp linux-2.6.32.45/grsecurity/grsec_sig.c linux-2.6.32.45/grsecurity/grsec_sig.c
54630--- linux-2.6.32.45/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54631+++ linux-2.6.32.45/grsecurity/grsec_sig.c 2011-06-29 19:40:31.000000000 -0400
54632@@ -0,0 +1,205 @@
54633+#include <linux/kernel.h>
54634+#include <linux/sched.h>
54635+#include <linux/delay.h>
54636+#include <linux/grsecurity.h>
54637+#include <linux/grinternal.h>
54638+#include <linux/hardirq.h>
54639+
54640+char *signames[] = {
54641+ [SIGSEGV] = "Segmentation fault",
54642+ [SIGILL] = "Illegal instruction",
54643+ [SIGABRT] = "Abort",
54644+ [SIGBUS] = "Invalid alignment/Bus error"
54645+};
54646+
54647+void
54648+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
54649+{
54650+#ifdef CONFIG_GRKERNSEC_SIGNAL
54651+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
54652+ (sig == SIGABRT) || (sig == SIGBUS))) {
54653+ if (t->pid == current->pid) {
54654+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
54655+ } else {
54656+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
54657+ }
54658+ }
54659+#endif
54660+ return;
54661+}
54662+
54663+int
54664+gr_handle_signal(const struct task_struct *p, const int sig)
54665+{
54666+#ifdef CONFIG_GRKERNSEC
54667+ if (current->pid > 1 && gr_check_protected_task(p)) {
54668+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
54669+ return -EPERM;
54670+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
54671+ return -EPERM;
54672+ }
54673+#endif
54674+ return 0;
54675+}
54676+
54677+#ifdef CONFIG_GRKERNSEC
54678+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
54679+
54680+int gr_fake_force_sig(int sig, struct task_struct *t)
54681+{
54682+ unsigned long int flags;
54683+ int ret, blocked, ignored;
54684+ struct k_sigaction *action;
54685+
54686+ spin_lock_irqsave(&t->sighand->siglock, flags);
54687+ action = &t->sighand->action[sig-1];
54688+ ignored = action->sa.sa_handler == SIG_IGN;
54689+ blocked = sigismember(&t->blocked, sig);
54690+ if (blocked || ignored) {
54691+ action->sa.sa_handler = SIG_DFL;
54692+ if (blocked) {
54693+ sigdelset(&t->blocked, sig);
54694+ recalc_sigpending_and_wake(t);
54695+ }
54696+ }
54697+ if (action->sa.sa_handler == SIG_DFL)
54698+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
54699+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
54700+
54701+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
54702+
54703+ return ret;
54704+}
54705+#endif
54706+
54707+#ifdef CONFIG_GRKERNSEC_BRUTE
54708+#define GR_USER_BAN_TIME (15 * 60)
54709+
54710+static int __get_dumpable(unsigned long mm_flags)
54711+{
54712+ int ret;
54713+
54714+ ret = mm_flags & MMF_DUMPABLE_MASK;
54715+ return (ret >= 2) ? 2 : ret;
54716+}
54717+#endif
54718+
54719+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
54720+{
54721+#ifdef CONFIG_GRKERNSEC_BRUTE
54722+ uid_t uid = 0;
54723+
54724+ if (!grsec_enable_brute)
54725+ return;
54726+
54727+ rcu_read_lock();
54728+ read_lock(&tasklist_lock);
54729+ read_lock(&grsec_exec_file_lock);
54730+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
54731+ p->real_parent->brute = 1;
54732+ else {
54733+ const struct cred *cred = __task_cred(p), *cred2;
54734+ struct task_struct *tsk, *tsk2;
54735+
54736+ if (!__get_dumpable(mm_flags) && cred->uid) {
54737+ struct user_struct *user;
54738+
54739+ uid = cred->uid;
54740+
54741+ /* this is put upon execution past expiration */
54742+ user = find_user(uid);
54743+ if (user == NULL)
54744+ goto unlock;
54745+ user->banned = 1;
54746+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
54747+ if (user->ban_expires == ~0UL)
54748+ user->ban_expires--;
54749+
54750+ do_each_thread(tsk2, tsk) {
54751+ cred2 = __task_cred(tsk);
54752+ if (tsk != p && cred2->uid == uid)
54753+ gr_fake_force_sig(SIGKILL, tsk);
54754+ } while_each_thread(tsk2, tsk);
54755+ }
54756+ }
54757+unlock:
54758+ read_unlock(&grsec_exec_file_lock);
54759+ read_unlock(&tasklist_lock);
54760+ rcu_read_unlock();
54761+
54762+ if (uid)
54763+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
54764+#endif
54765+ return;
54766+}
54767+
54768+void gr_handle_brute_check(void)
54769+{
54770+#ifdef CONFIG_GRKERNSEC_BRUTE
54771+ if (current->brute)
54772+ msleep(30 * 1000);
54773+#endif
54774+ return;
54775+}
54776+
54777+void gr_handle_kernel_exploit(void)
54778+{
54779+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
54780+ const struct cred *cred;
54781+ struct task_struct *tsk, *tsk2;
54782+ struct user_struct *user;
54783+ uid_t uid;
54784+
54785+ if (in_irq() || in_serving_softirq() || in_nmi())
54786+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
54787+
54788+ uid = current_uid();
54789+
54790+ if (uid == 0)
54791+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
54792+ else {
54793+ /* kill all the processes of this user, hold a reference
54794+ to their creds struct, and prevent them from creating
54795+ another process until system reset
54796+ */
54797+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
54798+ /* we intentionally leak this ref */
54799+ user = get_uid(current->cred->user);
54800+ if (user) {
54801+ user->banned = 1;
54802+ user->ban_expires = ~0UL;
54803+ }
54804+
54805+ read_lock(&tasklist_lock);
54806+ do_each_thread(tsk2, tsk) {
54807+ cred = __task_cred(tsk);
54808+ if (cred->uid == uid)
54809+ gr_fake_force_sig(SIGKILL, tsk);
54810+ } while_each_thread(tsk2, tsk);
54811+ read_unlock(&tasklist_lock);
54812+ }
54813+#endif
54814+}
54815+
54816+int __gr_process_user_ban(struct user_struct *user)
54817+{
54818+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54819+ if (unlikely(user->banned)) {
54820+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
54821+ user->banned = 0;
54822+ user->ban_expires = 0;
54823+ free_uid(user);
54824+ } else
54825+ return -EPERM;
54826+ }
54827+#endif
54828+ return 0;
54829+}
54830+
54831+int gr_process_user_ban(void)
54832+{
54833+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54834+ return __gr_process_user_ban(current->cred->user);
54835+#endif
54836+ return 0;
54837+}
54838diff -urNp linux-2.6.32.45/grsecurity/grsec_sock.c linux-2.6.32.45/grsecurity/grsec_sock.c
54839--- linux-2.6.32.45/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
54840+++ linux-2.6.32.45/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
54841@@ -0,0 +1,275 @@
54842+#include <linux/kernel.h>
54843+#include <linux/module.h>
54844+#include <linux/sched.h>
54845+#include <linux/file.h>
54846+#include <linux/net.h>
54847+#include <linux/in.h>
54848+#include <linux/ip.h>
54849+#include <net/sock.h>
54850+#include <net/inet_sock.h>
54851+#include <linux/grsecurity.h>
54852+#include <linux/grinternal.h>
54853+#include <linux/gracl.h>
54854+
54855+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
54856+EXPORT_SYMBOL(gr_cap_rtnetlink);
54857+
54858+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
54859+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
54860+
54861+EXPORT_SYMBOL(gr_search_udp_recvmsg);
54862+EXPORT_SYMBOL(gr_search_udp_sendmsg);
54863+
54864+#ifdef CONFIG_UNIX_MODULE
54865+EXPORT_SYMBOL(gr_acl_handle_unix);
54866+EXPORT_SYMBOL(gr_acl_handle_mknod);
54867+EXPORT_SYMBOL(gr_handle_chroot_unix);
54868+EXPORT_SYMBOL(gr_handle_create);
54869+#endif
54870+
54871+#ifdef CONFIG_GRKERNSEC
54872+#define gr_conn_table_size 32749
54873+struct conn_table_entry {
54874+ struct conn_table_entry *next;
54875+ struct signal_struct *sig;
54876+};
54877+
54878+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
54879+DEFINE_SPINLOCK(gr_conn_table_lock);
54880+
54881+extern const char * gr_socktype_to_name(unsigned char type);
54882+extern const char * gr_proto_to_name(unsigned char proto);
54883+extern const char * gr_sockfamily_to_name(unsigned char family);
54884+
54885+static __inline__ int
54886+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
54887+{
54888+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
54889+}
54890+
54891+static __inline__ int
54892+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
54893+ __u16 sport, __u16 dport)
54894+{
54895+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
54896+ sig->gr_sport == sport && sig->gr_dport == dport))
54897+ return 1;
54898+ else
54899+ return 0;
54900+}
54901+
54902+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
54903+{
54904+ struct conn_table_entry **match;
54905+ unsigned int index;
54906+
54907+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54908+ sig->gr_sport, sig->gr_dport,
54909+ gr_conn_table_size);
54910+
54911+ newent->sig = sig;
54912+
54913+ match = &gr_conn_table[index];
54914+ newent->next = *match;
54915+ *match = newent;
54916+
54917+ return;
54918+}
54919+
54920+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
54921+{
54922+ struct conn_table_entry *match, *last = NULL;
54923+ unsigned int index;
54924+
54925+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
54926+ sig->gr_sport, sig->gr_dport,
54927+ gr_conn_table_size);
54928+
54929+ match = gr_conn_table[index];
54930+ while (match && !conn_match(match->sig,
54931+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
54932+ sig->gr_dport)) {
54933+ last = match;
54934+ match = match->next;
54935+ }
54936+
54937+ if (match) {
54938+ if (last)
54939+ last->next = match->next;
54940+ else
54941+ gr_conn_table[index] = NULL;
54942+ kfree(match);
54943+ }
54944+
54945+ return;
54946+}
54947+
54948+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
54949+ __u16 sport, __u16 dport)
54950+{
54951+ struct conn_table_entry *match;
54952+ unsigned int index;
54953+
54954+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
54955+
54956+ match = gr_conn_table[index];
54957+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
54958+ match = match->next;
54959+
54960+ if (match)
54961+ return match->sig;
54962+ else
54963+ return NULL;
54964+}
54965+
54966+#endif
54967+
54968+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
54969+{
54970+#ifdef CONFIG_GRKERNSEC
54971+ struct signal_struct *sig = task->signal;
54972+ struct conn_table_entry *newent;
54973+
54974+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
54975+ if (newent == NULL)
54976+ return;
54977+ /* no bh lock needed since we are called with bh disabled */
54978+ spin_lock(&gr_conn_table_lock);
54979+ gr_del_task_from_ip_table_nolock(sig);
54980+ sig->gr_saddr = inet->rcv_saddr;
54981+ sig->gr_daddr = inet->daddr;
54982+ sig->gr_sport = inet->sport;
54983+ sig->gr_dport = inet->dport;
54984+ gr_add_to_task_ip_table_nolock(sig, newent);
54985+ spin_unlock(&gr_conn_table_lock);
54986+#endif
54987+ return;
54988+}
54989+
54990+void gr_del_task_from_ip_table(struct task_struct *task)
54991+{
54992+#ifdef CONFIG_GRKERNSEC
54993+ spin_lock_bh(&gr_conn_table_lock);
54994+ gr_del_task_from_ip_table_nolock(task->signal);
54995+ spin_unlock_bh(&gr_conn_table_lock);
54996+#endif
54997+ return;
54998+}
54999+
55000+void
55001+gr_attach_curr_ip(const struct sock *sk)
55002+{
55003+#ifdef CONFIG_GRKERNSEC
55004+ struct signal_struct *p, *set;
55005+ const struct inet_sock *inet = inet_sk(sk);
55006+
55007+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
55008+ return;
55009+
55010+ set = current->signal;
55011+
55012+ spin_lock_bh(&gr_conn_table_lock);
55013+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
55014+ inet->dport, inet->sport);
55015+ if (unlikely(p != NULL)) {
55016+ set->curr_ip = p->curr_ip;
55017+ set->used_accept = 1;
55018+ gr_del_task_from_ip_table_nolock(p);
55019+ spin_unlock_bh(&gr_conn_table_lock);
55020+ return;
55021+ }
55022+ spin_unlock_bh(&gr_conn_table_lock);
55023+
55024+ set->curr_ip = inet->daddr;
55025+ set->used_accept = 1;
55026+#endif
55027+ return;
55028+}
55029+
55030+int
55031+gr_handle_sock_all(const int family, const int type, const int protocol)
55032+{
55033+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55034+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
55035+ (family != AF_UNIX)) {
55036+ if (family == AF_INET)
55037+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
55038+ else
55039+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
55040+ return -EACCES;
55041+ }
55042+#endif
55043+ return 0;
55044+}
55045+
55046+int
55047+gr_handle_sock_server(const struct sockaddr *sck)
55048+{
55049+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55050+ if (grsec_enable_socket_server &&
55051+ in_group_p(grsec_socket_server_gid) &&
55052+ sck && (sck->sa_family != AF_UNIX) &&
55053+ (sck->sa_family != AF_LOCAL)) {
55054+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
55055+ return -EACCES;
55056+ }
55057+#endif
55058+ return 0;
55059+}
55060+
55061+int
55062+gr_handle_sock_server_other(const struct sock *sck)
55063+{
55064+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55065+ if (grsec_enable_socket_server &&
55066+ in_group_p(grsec_socket_server_gid) &&
55067+ sck && (sck->sk_family != AF_UNIX) &&
55068+ (sck->sk_family != AF_LOCAL)) {
55069+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
55070+ return -EACCES;
55071+ }
55072+#endif
55073+ return 0;
55074+}
55075+
55076+int
55077+gr_handle_sock_client(const struct sockaddr *sck)
55078+{
55079+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55080+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
55081+ sck && (sck->sa_family != AF_UNIX) &&
55082+ (sck->sa_family != AF_LOCAL)) {
55083+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
55084+ return -EACCES;
55085+ }
55086+#endif
55087+ return 0;
55088+}
55089+
55090+kernel_cap_t
55091+gr_cap_rtnetlink(struct sock *sock)
55092+{
55093+#ifdef CONFIG_GRKERNSEC
55094+ if (!gr_acl_is_enabled())
55095+ return current_cap();
55096+ else if (sock->sk_protocol == NETLINK_ISCSI &&
55097+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
55098+ gr_is_capable(CAP_SYS_ADMIN))
55099+ return current_cap();
55100+ else if (sock->sk_protocol == NETLINK_AUDIT &&
55101+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
55102+ gr_is_capable(CAP_AUDIT_WRITE) &&
55103+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
55104+ gr_is_capable(CAP_AUDIT_CONTROL))
55105+ return current_cap();
55106+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
55107+ ((sock->sk_protocol == NETLINK_ROUTE) ?
55108+ gr_is_capable_nolog(CAP_NET_ADMIN) :
55109+ gr_is_capable(CAP_NET_ADMIN)))
55110+ return current_cap();
55111+ else
55112+ return __cap_empty_set;
55113+#else
55114+ return current_cap();
55115+#endif
55116+}
55117diff -urNp linux-2.6.32.45/grsecurity/grsec_sysctl.c linux-2.6.32.45/grsecurity/grsec_sysctl.c
55118--- linux-2.6.32.45/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
55119+++ linux-2.6.32.45/grsecurity/grsec_sysctl.c 2011-08-11 19:57:54.000000000 -0400
55120@@ -0,0 +1,479 @@
55121+#include <linux/kernel.h>
55122+#include <linux/sched.h>
55123+#include <linux/sysctl.h>
55124+#include <linux/grsecurity.h>
55125+#include <linux/grinternal.h>
55126+
55127+int
55128+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
55129+{
55130+#ifdef CONFIG_GRKERNSEC_SYSCTL
55131+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
55132+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
55133+ return -EACCES;
55134+ }
55135+#endif
55136+ return 0;
55137+}
55138+
55139+#ifdef CONFIG_GRKERNSEC_ROFS
55140+static int __maybe_unused one = 1;
55141+#endif
55142+
55143+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
55144+ctl_table grsecurity_table[] = {
55145+#ifdef CONFIG_GRKERNSEC_SYSCTL
55146+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
55147+#ifdef CONFIG_GRKERNSEC_IO
55148+ {
55149+ .ctl_name = CTL_UNNUMBERED,
55150+ .procname = "disable_priv_io",
55151+ .data = &grsec_disable_privio,
55152+ .maxlen = sizeof(int),
55153+ .mode = 0600,
55154+ .proc_handler = &proc_dointvec,
55155+ },
55156+#endif
55157+#endif
55158+#ifdef CONFIG_GRKERNSEC_LINK
55159+ {
55160+ .ctl_name = CTL_UNNUMBERED,
55161+ .procname = "linking_restrictions",
55162+ .data = &grsec_enable_link,
55163+ .maxlen = sizeof(int),
55164+ .mode = 0600,
55165+ .proc_handler = &proc_dointvec,
55166+ },
55167+#endif
55168+#ifdef CONFIG_GRKERNSEC_BRUTE
55169+ {
55170+ .ctl_name = CTL_UNNUMBERED,
55171+ .procname = "deter_bruteforce",
55172+ .data = &grsec_enable_brute,
55173+ .maxlen = sizeof(int),
55174+ .mode = 0600,
55175+ .proc_handler = &proc_dointvec,
55176+ },
55177+#endif
55178+#ifdef CONFIG_GRKERNSEC_FIFO
55179+ {
55180+ .ctl_name = CTL_UNNUMBERED,
55181+ .procname = "fifo_restrictions",
55182+ .data = &grsec_enable_fifo,
55183+ .maxlen = sizeof(int),
55184+ .mode = 0600,
55185+ .proc_handler = &proc_dointvec,
55186+ },
55187+#endif
55188+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55189+ {
55190+ .ctl_name = CTL_UNNUMBERED,
55191+ .procname = "ip_blackhole",
55192+ .data = &grsec_enable_blackhole,
55193+ .maxlen = sizeof(int),
55194+ .mode = 0600,
55195+ .proc_handler = &proc_dointvec,
55196+ },
55197+ {
55198+ .ctl_name = CTL_UNNUMBERED,
55199+ .procname = "lastack_retries",
55200+ .data = &grsec_lastack_retries,
55201+ .maxlen = sizeof(int),
55202+ .mode = 0600,
55203+ .proc_handler = &proc_dointvec,
55204+ },
55205+#endif
55206+#ifdef CONFIG_GRKERNSEC_EXECLOG
55207+ {
55208+ .ctl_name = CTL_UNNUMBERED,
55209+ .procname = "exec_logging",
55210+ .data = &grsec_enable_execlog,
55211+ .maxlen = sizeof(int),
55212+ .mode = 0600,
55213+ .proc_handler = &proc_dointvec,
55214+ },
55215+#endif
55216+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55217+ {
55218+ .ctl_name = CTL_UNNUMBERED,
55219+ .procname = "rwxmap_logging",
55220+ .data = &grsec_enable_log_rwxmaps,
55221+ .maxlen = sizeof(int),
55222+ .mode = 0600,
55223+ .proc_handler = &proc_dointvec,
55224+ },
55225+#endif
55226+#ifdef CONFIG_GRKERNSEC_SIGNAL
55227+ {
55228+ .ctl_name = CTL_UNNUMBERED,
55229+ .procname = "signal_logging",
55230+ .data = &grsec_enable_signal,
55231+ .maxlen = sizeof(int),
55232+ .mode = 0600,
55233+ .proc_handler = &proc_dointvec,
55234+ },
55235+#endif
55236+#ifdef CONFIG_GRKERNSEC_FORKFAIL
55237+ {
55238+ .ctl_name = CTL_UNNUMBERED,
55239+ .procname = "forkfail_logging",
55240+ .data = &grsec_enable_forkfail,
55241+ .maxlen = sizeof(int),
55242+ .mode = 0600,
55243+ .proc_handler = &proc_dointvec,
55244+ },
55245+#endif
55246+#ifdef CONFIG_GRKERNSEC_TIME
55247+ {
55248+ .ctl_name = CTL_UNNUMBERED,
55249+ .procname = "timechange_logging",
55250+ .data = &grsec_enable_time,
55251+ .maxlen = sizeof(int),
55252+ .mode = 0600,
55253+ .proc_handler = &proc_dointvec,
55254+ },
55255+#endif
55256+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55257+ {
55258+ .ctl_name = CTL_UNNUMBERED,
55259+ .procname = "chroot_deny_shmat",
55260+ .data = &grsec_enable_chroot_shmat,
55261+ .maxlen = sizeof(int),
55262+ .mode = 0600,
55263+ .proc_handler = &proc_dointvec,
55264+ },
55265+#endif
55266+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55267+ {
55268+ .ctl_name = CTL_UNNUMBERED,
55269+ .procname = "chroot_deny_unix",
55270+ .data = &grsec_enable_chroot_unix,
55271+ .maxlen = sizeof(int),
55272+ .mode = 0600,
55273+ .proc_handler = &proc_dointvec,
55274+ },
55275+#endif
55276+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55277+ {
55278+ .ctl_name = CTL_UNNUMBERED,
55279+ .procname = "chroot_deny_mount",
55280+ .data = &grsec_enable_chroot_mount,
55281+ .maxlen = sizeof(int),
55282+ .mode = 0600,
55283+ .proc_handler = &proc_dointvec,
55284+ },
55285+#endif
55286+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55287+ {
55288+ .ctl_name = CTL_UNNUMBERED,
55289+ .procname = "chroot_deny_fchdir",
55290+ .data = &grsec_enable_chroot_fchdir,
55291+ .maxlen = sizeof(int),
55292+ .mode = 0600,
55293+ .proc_handler = &proc_dointvec,
55294+ },
55295+#endif
55296+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55297+ {
55298+ .ctl_name = CTL_UNNUMBERED,
55299+ .procname = "chroot_deny_chroot",
55300+ .data = &grsec_enable_chroot_double,
55301+ .maxlen = sizeof(int),
55302+ .mode = 0600,
55303+ .proc_handler = &proc_dointvec,
55304+ },
55305+#endif
55306+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55307+ {
55308+ .ctl_name = CTL_UNNUMBERED,
55309+ .procname = "chroot_deny_pivot",
55310+ .data = &grsec_enable_chroot_pivot,
55311+ .maxlen = sizeof(int),
55312+ .mode = 0600,
55313+ .proc_handler = &proc_dointvec,
55314+ },
55315+#endif
55316+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55317+ {
55318+ .ctl_name = CTL_UNNUMBERED,
55319+ .procname = "chroot_enforce_chdir",
55320+ .data = &grsec_enable_chroot_chdir,
55321+ .maxlen = sizeof(int),
55322+ .mode = 0600,
55323+ .proc_handler = &proc_dointvec,
55324+ },
55325+#endif
55326+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55327+ {
55328+ .ctl_name = CTL_UNNUMBERED,
55329+ .procname = "chroot_deny_chmod",
55330+ .data = &grsec_enable_chroot_chmod,
55331+ .maxlen = sizeof(int),
55332+ .mode = 0600,
55333+ .proc_handler = &proc_dointvec,
55334+ },
55335+#endif
55336+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55337+ {
55338+ .ctl_name = CTL_UNNUMBERED,
55339+ .procname = "chroot_deny_mknod",
55340+ .data = &grsec_enable_chroot_mknod,
55341+ .maxlen = sizeof(int),
55342+ .mode = 0600,
55343+ .proc_handler = &proc_dointvec,
55344+ },
55345+#endif
55346+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55347+ {
55348+ .ctl_name = CTL_UNNUMBERED,
55349+ .procname = "chroot_restrict_nice",
55350+ .data = &grsec_enable_chroot_nice,
55351+ .maxlen = sizeof(int),
55352+ .mode = 0600,
55353+ .proc_handler = &proc_dointvec,
55354+ },
55355+#endif
55356+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55357+ {
55358+ .ctl_name = CTL_UNNUMBERED,
55359+ .procname = "chroot_execlog",
55360+ .data = &grsec_enable_chroot_execlog,
55361+ .maxlen = sizeof(int),
55362+ .mode = 0600,
55363+ .proc_handler = &proc_dointvec,
55364+ },
55365+#endif
55366+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55367+ {
55368+ .ctl_name = CTL_UNNUMBERED,
55369+ .procname = "chroot_caps",
55370+ .data = &grsec_enable_chroot_caps,
55371+ .maxlen = sizeof(int),
55372+ .mode = 0600,
55373+ .proc_handler = &proc_dointvec,
55374+ },
55375+#endif
55376+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55377+ {
55378+ .ctl_name = CTL_UNNUMBERED,
55379+ .procname = "chroot_deny_sysctl",
55380+ .data = &grsec_enable_chroot_sysctl,
55381+ .maxlen = sizeof(int),
55382+ .mode = 0600,
55383+ .proc_handler = &proc_dointvec,
55384+ },
55385+#endif
55386+#ifdef CONFIG_GRKERNSEC_TPE
55387+ {
55388+ .ctl_name = CTL_UNNUMBERED,
55389+ .procname = "tpe",
55390+ .data = &grsec_enable_tpe,
55391+ .maxlen = sizeof(int),
55392+ .mode = 0600,
55393+ .proc_handler = &proc_dointvec,
55394+ },
55395+ {
55396+ .ctl_name = CTL_UNNUMBERED,
55397+ .procname = "tpe_gid",
55398+ .data = &grsec_tpe_gid,
55399+ .maxlen = sizeof(int),
55400+ .mode = 0600,
55401+ .proc_handler = &proc_dointvec,
55402+ },
55403+#endif
55404+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55405+ {
55406+ .ctl_name = CTL_UNNUMBERED,
55407+ .procname = "tpe_invert",
55408+ .data = &grsec_enable_tpe_invert,
55409+ .maxlen = sizeof(int),
55410+ .mode = 0600,
55411+ .proc_handler = &proc_dointvec,
55412+ },
55413+#endif
55414+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55415+ {
55416+ .ctl_name = CTL_UNNUMBERED,
55417+ .procname = "tpe_restrict_all",
55418+ .data = &grsec_enable_tpe_all,
55419+ .maxlen = sizeof(int),
55420+ .mode = 0600,
55421+ .proc_handler = &proc_dointvec,
55422+ },
55423+#endif
55424+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55425+ {
55426+ .ctl_name = CTL_UNNUMBERED,
55427+ .procname = "socket_all",
55428+ .data = &grsec_enable_socket_all,
55429+ .maxlen = sizeof(int),
55430+ .mode = 0600,
55431+ .proc_handler = &proc_dointvec,
55432+ },
55433+ {
55434+ .ctl_name = CTL_UNNUMBERED,
55435+ .procname = "socket_all_gid",
55436+ .data = &grsec_socket_all_gid,
55437+ .maxlen = sizeof(int),
55438+ .mode = 0600,
55439+ .proc_handler = &proc_dointvec,
55440+ },
55441+#endif
55442+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55443+ {
55444+ .ctl_name = CTL_UNNUMBERED,
55445+ .procname = "socket_client",
55446+ .data = &grsec_enable_socket_client,
55447+ .maxlen = sizeof(int),
55448+ .mode = 0600,
55449+ .proc_handler = &proc_dointvec,
55450+ },
55451+ {
55452+ .ctl_name = CTL_UNNUMBERED,
55453+ .procname = "socket_client_gid",
55454+ .data = &grsec_socket_client_gid,
55455+ .maxlen = sizeof(int),
55456+ .mode = 0600,
55457+ .proc_handler = &proc_dointvec,
55458+ },
55459+#endif
55460+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55461+ {
55462+ .ctl_name = CTL_UNNUMBERED,
55463+ .procname = "socket_server",
55464+ .data = &grsec_enable_socket_server,
55465+ .maxlen = sizeof(int),
55466+ .mode = 0600,
55467+ .proc_handler = &proc_dointvec,
55468+ },
55469+ {
55470+ .ctl_name = CTL_UNNUMBERED,
55471+ .procname = "socket_server_gid",
55472+ .data = &grsec_socket_server_gid,
55473+ .maxlen = sizeof(int),
55474+ .mode = 0600,
55475+ .proc_handler = &proc_dointvec,
55476+ },
55477+#endif
55478+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55479+ {
55480+ .ctl_name = CTL_UNNUMBERED,
55481+ .procname = "audit_group",
55482+ .data = &grsec_enable_group,
55483+ .maxlen = sizeof(int),
55484+ .mode = 0600,
55485+ .proc_handler = &proc_dointvec,
55486+ },
55487+ {
55488+ .ctl_name = CTL_UNNUMBERED,
55489+ .procname = "audit_gid",
55490+ .data = &grsec_audit_gid,
55491+ .maxlen = sizeof(int),
55492+ .mode = 0600,
55493+ .proc_handler = &proc_dointvec,
55494+ },
55495+#endif
55496+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55497+ {
55498+ .ctl_name = CTL_UNNUMBERED,
55499+ .procname = "audit_chdir",
55500+ .data = &grsec_enable_chdir,
55501+ .maxlen = sizeof(int),
55502+ .mode = 0600,
55503+ .proc_handler = &proc_dointvec,
55504+ },
55505+#endif
55506+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55507+ {
55508+ .ctl_name = CTL_UNNUMBERED,
55509+ .procname = "audit_mount",
55510+ .data = &grsec_enable_mount,
55511+ .maxlen = sizeof(int),
55512+ .mode = 0600,
55513+ .proc_handler = &proc_dointvec,
55514+ },
55515+#endif
55516+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55517+ {
55518+ .ctl_name = CTL_UNNUMBERED,
55519+ .procname = "audit_textrel",
55520+ .data = &grsec_enable_audit_textrel,
55521+ .maxlen = sizeof(int),
55522+ .mode = 0600,
55523+ .proc_handler = &proc_dointvec,
55524+ },
55525+#endif
55526+#ifdef CONFIG_GRKERNSEC_DMESG
55527+ {
55528+ .ctl_name = CTL_UNNUMBERED,
55529+ .procname = "dmesg",
55530+ .data = &grsec_enable_dmesg,
55531+ .maxlen = sizeof(int),
55532+ .mode = 0600,
55533+ .proc_handler = &proc_dointvec,
55534+ },
55535+#endif
55536+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55537+ {
55538+ .ctl_name = CTL_UNNUMBERED,
55539+ .procname = "chroot_findtask",
55540+ .data = &grsec_enable_chroot_findtask,
55541+ .maxlen = sizeof(int),
55542+ .mode = 0600,
55543+ .proc_handler = &proc_dointvec,
55544+ },
55545+#endif
55546+#ifdef CONFIG_GRKERNSEC_RESLOG
55547+ {
55548+ .ctl_name = CTL_UNNUMBERED,
55549+ .procname = "resource_logging",
55550+ .data = &grsec_resource_logging,
55551+ .maxlen = sizeof(int),
55552+ .mode = 0600,
55553+ .proc_handler = &proc_dointvec,
55554+ },
55555+#endif
55556+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55557+ {
55558+ .ctl_name = CTL_UNNUMBERED,
55559+ .procname = "audit_ptrace",
55560+ .data = &grsec_enable_audit_ptrace,
55561+ .maxlen = sizeof(int),
55562+ .mode = 0600,
55563+ .proc_handler = &proc_dointvec,
55564+ },
55565+#endif
55566+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55567+ {
55568+ .ctl_name = CTL_UNNUMBERED,
55569+ .procname = "harden_ptrace",
55570+ .data = &grsec_enable_harden_ptrace,
55571+ .maxlen = sizeof(int),
55572+ .mode = 0600,
55573+ .proc_handler = &proc_dointvec,
55574+ },
55575+#endif
55576+ {
55577+ .ctl_name = CTL_UNNUMBERED,
55578+ .procname = "grsec_lock",
55579+ .data = &grsec_lock,
55580+ .maxlen = sizeof(int),
55581+ .mode = 0600,
55582+ .proc_handler = &proc_dointvec,
55583+ },
55584+#endif
55585+#ifdef CONFIG_GRKERNSEC_ROFS
55586+ {
55587+ .ctl_name = CTL_UNNUMBERED,
55588+ .procname = "romount_protect",
55589+ .data = &grsec_enable_rofs,
55590+ .maxlen = sizeof(int),
55591+ .mode = 0600,
55592+ .proc_handler = &proc_dointvec_minmax,
55593+ .extra1 = &one,
55594+ .extra2 = &one,
55595+ },
55596+#endif
55597+ { .ctl_name = 0 }
55598+};
55599+#endif
55600diff -urNp linux-2.6.32.45/grsecurity/grsec_time.c linux-2.6.32.45/grsecurity/grsec_time.c
55601--- linux-2.6.32.45/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55602+++ linux-2.6.32.45/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
55603@@ -0,0 +1,16 @@
55604+#include <linux/kernel.h>
55605+#include <linux/sched.h>
55606+#include <linux/grinternal.h>
55607+#include <linux/module.h>
55608+
55609+void
55610+gr_log_timechange(void)
55611+{
55612+#ifdef CONFIG_GRKERNSEC_TIME
55613+ if (grsec_enable_time)
55614+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55615+#endif
55616+ return;
55617+}
55618+
55619+EXPORT_SYMBOL(gr_log_timechange);
55620diff -urNp linux-2.6.32.45/grsecurity/grsec_tpe.c linux-2.6.32.45/grsecurity/grsec_tpe.c
55621--- linux-2.6.32.45/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55622+++ linux-2.6.32.45/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
55623@@ -0,0 +1,39 @@
55624+#include <linux/kernel.h>
55625+#include <linux/sched.h>
55626+#include <linux/file.h>
55627+#include <linux/fs.h>
55628+#include <linux/grinternal.h>
55629+
55630+extern int gr_acl_tpe_check(void);
55631+
55632+int
55633+gr_tpe_allow(const struct file *file)
55634+{
55635+#ifdef CONFIG_GRKERNSEC
55636+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55637+ const struct cred *cred = current_cred();
55638+
55639+ if (cred->uid && ((grsec_enable_tpe &&
55640+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55641+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55642+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55643+#else
55644+ in_group_p(grsec_tpe_gid)
55645+#endif
55646+ ) || gr_acl_tpe_check()) &&
55647+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55648+ (inode->i_mode & S_IWOTH))))) {
55649+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55650+ return 0;
55651+ }
55652+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55653+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55654+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55655+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55656+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55657+ return 0;
55658+ }
55659+#endif
55660+#endif
55661+ return 1;
55662+}
55663diff -urNp linux-2.6.32.45/grsecurity/grsum.c linux-2.6.32.45/grsecurity/grsum.c
55664--- linux-2.6.32.45/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55665+++ linux-2.6.32.45/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
55666@@ -0,0 +1,61 @@
55667+#include <linux/err.h>
55668+#include <linux/kernel.h>
55669+#include <linux/sched.h>
55670+#include <linux/mm.h>
55671+#include <linux/scatterlist.h>
55672+#include <linux/crypto.h>
55673+#include <linux/gracl.h>
55674+
55675+
55676+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55677+#error "crypto and sha256 must be built into the kernel"
55678+#endif
55679+
55680+int
55681+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
55682+{
55683+ char *p;
55684+ struct crypto_hash *tfm;
55685+ struct hash_desc desc;
55686+ struct scatterlist sg;
55687+ unsigned char temp_sum[GR_SHA_LEN];
55688+ volatile int retval = 0;
55689+ volatile int dummy = 0;
55690+ unsigned int i;
55691+
55692+ sg_init_table(&sg, 1);
55693+
55694+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
55695+ if (IS_ERR(tfm)) {
55696+ /* should never happen, since sha256 should be built in */
55697+ return 1;
55698+ }
55699+
55700+ desc.tfm = tfm;
55701+ desc.flags = 0;
55702+
55703+ crypto_hash_init(&desc);
55704+
55705+ p = salt;
55706+ sg_set_buf(&sg, p, GR_SALT_LEN);
55707+ crypto_hash_update(&desc, &sg, sg.length);
55708+
55709+ p = entry->pw;
55710+ sg_set_buf(&sg, p, strlen(p));
55711+
55712+ crypto_hash_update(&desc, &sg, sg.length);
55713+
55714+ crypto_hash_final(&desc, temp_sum);
55715+
55716+ memset(entry->pw, 0, GR_PW_LEN);
55717+
55718+ for (i = 0; i < GR_SHA_LEN; i++)
55719+ if (sum[i] != temp_sum[i])
55720+ retval = 1;
55721+ else
55722+ dummy = 1; // waste a cycle
55723+
55724+ crypto_free_hash(tfm);
55725+
55726+ return retval;
55727+}
55728diff -urNp linux-2.6.32.45/grsecurity/Kconfig linux-2.6.32.45/grsecurity/Kconfig
55729--- linux-2.6.32.45/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
55730+++ linux-2.6.32.45/grsecurity/Kconfig 2011-08-17 19:04:25.000000000 -0400
55731@@ -0,0 +1,1037 @@
55732+#
55733+# grecurity configuration
55734+#
55735+
55736+menu "Grsecurity"
55737+
55738+config GRKERNSEC
55739+ bool "Grsecurity"
55740+ select CRYPTO
55741+ select CRYPTO_SHA256
55742+ help
55743+ If you say Y here, you will be able to configure many features
55744+ that will enhance the security of your system. It is highly
55745+ recommended that you say Y here and read through the help
55746+ for each option so that you fully understand the features and
55747+ can evaluate their usefulness for your machine.
55748+
55749+choice
55750+ prompt "Security Level"
55751+ depends on GRKERNSEC
55752+ default GRKERNSEC_CUSTOM
55753+
55754+config GRKERNSEC_LOW
55755+ bool "Low"
55756+ select GRKERNSEC_LINK
55757+ select GRKERNSEC_FIFO
55758+ select GRKERNSEC_RANDNET
55759+ select GRKERNSEC_DMESG
55760+ select GRKERNSEC_CHROOT
55761+ select GRKERNSEC_CHROOT_CHDIR
55762+
55763+ help
55764+ If you choose this option, several of the grsecurity options will
55765+ be enabled that will give you greater protection against a number
55766+ of attacks, while assuring that none of your software will have any
55767+ conflicts with the additional security measures. If you run a lot
55768+ of unusual software, or you are having problems with the higher
55769+ security levels, you should say Y here. With this option, the
55770+ following features are enabled:
55771+
55772+ - Linking restrictions
55773+ - FIFO restrictions
55774+ - Restricted dmesg
55775+ - Enforced chdir("/") on chroot
55776+ - Runtime module disabling
55777+
55778+config GRKERNSEC_MEDIUM
55779+ bool "Medium"
55780+ select PAX
55781+ select PAX_EI_PAX
55782+ select PAX_PT_PAX_FLAGS
55783+ select PAX_HAVE_ACL_FLAGS
55784+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55785+ select GRKERNSEC_CHROOT
55786+ select GRKERNSEC_CHROOT_SYSCTL
55787+ select GRKERNSEC_LINK
55788+ select GRKERNSEC_FIFO
55789+ select GRKERNSEC_DMESG
55790+ select GRKERNSEC_RANDNET
55791+ select GRKERNSEC_FORKFAIL
55792+ select GRKERNSEC_TIME
55793+ select GRKERNSEC_SIGNAL
55794+ select GRKERNSEC_CHROOT
55795+ select GRKERNSEC_CHROOT_UNIX
55796+ select GRKERNSEC_CHROOT_MOUNT
55797+ select GRKERNSEC_CHROOT_PIVOT
55798+ select GRKERNSEC_CHROOT_DOUBLE
55799+ select GRKERNSEC_CHROOT_CHDIR
55800+ select GRKERNSEC_CHROOT_MKNOD
55801+ select GRKERNSEC_PROC
55802+ select GRKERNSEC_PROC_USERGROUP
55803+ select PAX_RANDUSTACK
55804+ select PAX_ASLR
55805+ select PAX_RANDMMAP
55806+ select PAX_REFCOUNT if (X86 || SPARC64)
55807+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55808+
55809+ help
55810+ If you say Y here, several features in addition to those included
55811+ in the low additional security level will be enabled. These
55812+ features provide even more security to your system, though in rare
55813+ cases they may be incompatible with very old or poorly written
55814+ software. If you enable this option, make sure that your auth
55815+ service (identd) is running as gid 1001. With this option,
55816+ the following features (in addition to those provided in the
55817+ low additional security level) will be enabled:
55818+
55819+ - Failed fork logging
55820+ - Time change logging
55821+ - Signal logging
55822+ - Deny mounts in chroot
55823+ - Deny double chrooting
55824+ - Deny sysctl writes in chroot
55825+ - Deny mknod in chroot
55826+ - Deny access to abstract AF_UNIX sockets out of chroot
55827+ - Deny pivot_root in chroot
55828+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
55829+ - /proc restrictions with special GID set to 10 (usually wheel)
55830+ - Address Space Layout Randomization (ASLR)
55831+ - Prevent exploitation of most refcount overflows
55832+ - Bounds checking of copying between the kernel and userland
55833+
55834+config GRKERNSEC_HIGH
55835+ bool "High"
55836+ select GRKERNSEC_LINK
55837+ select GRKERNSEC_FIFO
55838+ select GRKERNSEC_DMESG
55839+ select GRKERNSEC_FORKFAIL
55840+ select GRKERNSEC_TIME
55841+ select GRKERNSEC_SIGNAL
55842+ select GRKERNSEC_CHROOT
55843+ select GRKERNSEC_CHROOT_SHMAT
55844+ select GRKERNSEC_CHROOT_UNIX
55845+ select GRKERNSEC_CHROOT_MOUNT
55846+ select GRKERNSEC_CHROOT_FCHDIR
55847+ select GRKERNSEC_CHROOT_PIVOT
55848+ select GRKERNSEC_CHROOT_DOUBLE
55849+ select GRKERNSEC_CHROOT_CHDIR
55850+ select GRKERNSEC_CHROOT_MKNOD
55851+ select GRKERNSEC_CHROOT_CAPS
55852+ select GRKERNSEC_CHROOT_SYSCTL
55853+ select GRKERNSEC_CHROOT_FINDTASK
55854+ select GRKERNSEC_SYSFS_RESTRICT
55855+ select GRKERNSEC_PROC
55856+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55857+ select GRKERNSEC_HIDESYM
55858+ select GRKERNSEC_BRUTE
55859+ select GRKERNSEC_PROC_USERGROUP
55860+ select GRKERNSEC_KMEM
55861+ select GRKERNSEC_RESLOG
55862+ select GRKERNSEC_RANDNET
55863+ select GRKERNSEC_PROC_ADD
55864+ select GRKERNSEC_CHROOT_CHMOD
55865+ select GRKERNSEC_CHROOT_NICE
55866+ select GRKERNSEC_AUDIT_MOUNT
55867+ select GRKERNSEC_MODHARDEN if (MODULES)
55868+ select GRKERNSEC_HARDEN_PTRACE
55869+ select GRKERNSEC_VM86 if (X86_32)
55870+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55871+ select PAX
55872+ select PAX_RANDUSTACK
55873+ select PAX_ASLR
55874+ select PAX_RANDMMAP
55875+ select PAX_NOEXEC
55876+ select PAX_MPROTECT
55877+ select PAX_EI_PAX
55878+ select PAX_PT_PAX_FLAGS
55879+ select PAX_HAVE_ACL_FLAGS
55880+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55881+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
55882+ select PAX_RANDKSTACK if (X86_TSC && X86)
55883+ select PAX_SEGMEXEC if (X86_32)
55884+ select PAX_PAGEEXEC
55885+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55886+ select PAX_EMUTRAMP if (PARISC)
55887+ select PAX_EMUSIGRT if (PARISC)
55888+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55889+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55890+ select PAX_REFCOUNT if (X86 || SPARC64)
55891+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55892+ help
55893+ If you say Y here, many of the features of grsecurity will be
55894+ enabled, which will protect you against many kinds of attacks
55895+ against your system. The heightened security comes at a cost
55896+ of an increased chance of incompatibilities with rare software
55897+ on your machine. Since this security level enables PaX, you should
55898+ view <http://pax.grsecurity.net> and read about the PaX
55899+ project. While you are there, download chpax and run it on
55900+ binaries that cause problems with PaX. Also remember that
55901+ since the /proc restrictions are enabled, you must run your
55902+ identd as gid 1001. This security level enables the following
55903+ features in addition to those listed in the low and medium
55904+ security levels:
55905+
55906+ - Additional /proc restrictions
55907+ - Chmod restrictions in chroot
55908+ - No signals, ptrace, or viewing of processes outside of chroot
55909+ - Capability restrictions in chroot
55910+ - Deny fchdir out of chroot
55911+ - Priority restrictions in chroot
55912+ - Segmentation-based implementation of PaX
55913+ - Mprotect restrictions
55914+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55915+ - Kernel stack randomization
55916+ - Mount/unmount/remount logging
55917+ - Kernel symbol hiding
55918+ - Prevention of memory exhaustion-based exploits
55919+ - Hardening of module auto-loading
55920+ - Ptrace restrictions
55921+ - Restricted vm86 mode
55922+ - Restricted sysfs/debugfs
55923+ - Active kernel exploit response
55924+
55925+config GRKERNSEC_CUSTOM
55926+ bool "Custom"
55927+ help
55928+ If you say Y here, you will be able to configure every grsecurity
55929+ option, which allows you to enable many more features that aren't
55930+ covered in the basic security levels. These additional features
55931+ include TPE, socket restrictions, and the sysctl system for
55932+ grsecurity. It is advised that you read through the help for
55933+ each option to determine its usefulness in your situation.
55934+
55935+endchoice
55936+
55937+menu "Address Space Protection"
55938+depends on GRKERNSEC
55939+
55940+config GRKERNSEC_KMEM
55941+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
55942+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55943+ help
55944+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55945+ be written to via mmap or otherwise to modify the running kernel.
55946+ /dev/port will also not be allowed to be opened. If you have module
55947+ support disabled, enabling this will close up four ways that are
55948+ currently used to insert malicious code into the running kernel.
55949+ Even with all these features enabled, we still highly recommend that
55950+ you use the RBAC system, as it is still possible for an attacker to
55951+ modify the running kernel through privileged I/O granted by ioperm/iopl.
55952+ If you are not using XFree86, you may be able to stop this additional
55953+ case by enabling the 'Disable privileged I/O' option. Though nothing
55954+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55955+ but only to video memory, which is the only writing we allow in this
55956+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55957+ not be allowed to mprotect it with PROT_WRITE later.
55958+ It is highly recommended that you say Y here if you meet all the
55959+ conditions above.
55960+
55961+config GRKERNSEC_VM86
55962+ bool "Restrict VM86 mode"
55963+ depends on X86_32
55964+
55965+ help
55966+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55967+ make use of a special execution mode on 32bit x86 processors called
55968+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55969+ video cards and will still work with this option enabled. The purpose
55970+ of the option is to prevent exploitation of emulation errors in
55971+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
55972+ Nearly all users should be able to enable this option.
55973+
55974+config GRKERNSEC_IO
55975+ bool "Disable privileged I/O"
55976+ depends on X86
55977+ select RTC_CLASS
55978+ select RTC_INTF_DEV
55979+ select RTC_DRV_CMOS
55980+
55981+ help
55982+ If you say Y here, all ioperm and iopl calls will return an error.
55983+ Ioperm and iopl can be used to modify the running kernel.
55984+ Unfortunately, some programs need this access to operate properly,
55985+ the most notable of which are XFree86 and hwclock. hwclock can be
55986+ remedied by having RTC support in the kernel, so real-time
55987+ clock support is enabled if this option is enabled, to ensure
55988+ that hwclock operates correctly. XFree86 still will not
55989+ operate correctly with this option enabled, so DO NOT CHOOSE Y
55990+ IF YOU USE XFree86. If you use XFree86 and you still want to
55991+ protect your kernel against modification, use the RBAC system.
55992+
55993+config GRKERNSEC_PROC_MEMMAP
55994+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
55995+ default y if (PAX_NOEXEC || PAX_ASLR)
55996+ depends on PAX_NOEXEC || PAX_ASLR
55997+ help
55998+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55999+ give no information about the addresses of its mappings if
56000+ PaX features that rely on random addresses are enabled on the task.
56001+ If you use PaX it is greatly recommended that you say Y here as it
56002+ closes up a hole that makes the full ASLR useless for suid
56003+ binaries.
56004+
56005+config GRKERNSEC_BRUTE
56006+ bool "Deter exploit bruteforcing"
56007+ help
56008+ If you say Y here, attempts to bruteforce exploits against forking
56009+ daemons such as apache or sshd, as well as against suid/sgid binaries
56010+ will be deterred. When a child of a forking daemon is killed by PaX
56011+ or crashes due to an illegal instruction or other suspicious signal,
56012+ the parent process will be delayed 30 seconds upon every subsequent
56013+ fork until the administrator is able to assess the situation and
56014+ restart the daemon.
56015+ In the suid/sgid case, the attempt is logged, the user has all their
56016+ processes terminated, and they are prevented from executing any further
56017+ processes for 15 minutes.
56018+ It is recommended that you also enable signal logging in the auditing
56019+ section so that logs are generated when a process triggers a suspicious
56020+ signal.
56021+ If the sysctl option is enabled, a sysctl option with name
56022+ "deter_bruteforce" is created.
56023+
56024+config GRKERNSEC_MODHARDEN
56025+ bool "Harden module auto-loading"
56026+ depends on MODULES
56027+ help
56028+ If you say Y here, module auto-loading in response to use of some
56029+ feature implemented by an unloaded module will be restricted to
56030+ root users. Enabling this option helps defend against attacks
56031+ by unprivileged users who abuse the auto-loading behavior to
56032+ cause a vulnerable module to load that is then exploited.
56033+
56034+ If this option prevents a legitimate use of auto-loading for a
56035+ non-root user, the administrator can execute modprobe manually
56036+ with the exact name of the module mentioned in the alert log.
56037+ Alternatively, the administrator can add the module to the list
56038+ of modules loaded at boot by modifying init scripts.
56039+
56040+ Modification of init scripts will most likely be needed on
56041+ Ubuntu servers with encrypted home directory support enabled,
56042+ as the first non-root user logging in will cause the ecb(aes),
56043+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
56044+
56045+config GRKERNSEC_HIDESYM
56046+ bool "Hide kernel symbols"
56047+ help
56048+ If you say Y here, getting information on loaded modules, and
56049+ displaying all kernel symbols through a syscall will be restricted
56050+ to users with CAP_SYS_MODULE. For software compatibility reasons,
56051+ /proc/kallsyms will be restricted to the root user. The RBAC
56052+ system can hide that entry even from root.
56053+
56054+ This option also prevents leaking of kernel addresses through
56055+ several /proc entries.
56056+
56057+ Note that this option is only effective provided the following
56058+ conditions are met:
56059+ 1) The kernel using grsecurity is not precompiled by some distribution
56060+ 2) You have also enabled GRKERNSEC_DMESG
56061+ 3) You are using the RBAC system and hiding other files such as your
56062+ kernel image and System.map. Alternatively, enabling this option
56063+ causes the permissions on /boot, /lib/modules, and the kernel
56064+ source directory to change at compile time to prevent
56065+ reading by non-root users.
56066+ If the above conditions are met, this option will aid in providing a
56067+ useful protection against local kernel exploitation of overflows
56068+ and arbitrary read/write vulnerabilities.
56069+
56070+config GRKERNSEC_KERN_LOCKOUT
56071+ bool "Active kernel exploit response"
56072+ depends on X86 || ARM || PPC || SPARC
56073+ help
56074+ If you say Y here, when a PaX alert is triggered due to suspicious
56075+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
56076+ or an OOPs occurs due to bad memory accesses, instead of just
56077+ terminating the offending process (and potentially allowing
56078+ a subsequent exploit from the same user), we will take one of two
56079+ actions:
56080+ If the user was root, we will panic the system
56081+ If the user was non-root, we will log the attempt, terminate
56082+ all processes owned by the user, then prevent them from creating
56083+ any new processes until the system is restarted
56084+ This deters repeated kernel exploitation/bruteforcing attempts
56085+ and is useful for later forensics.
56086+
56087+endmenu
56088+menu "Role Based Access Control Options"
56089+depends on GRKERNSEC
56090+
56091+config GRKERNSEC_RBAC_DEBUG
56092+ bool
56093+
56094+config GRKERNSEC_NO_RBAC
56095+ bool "Disable RBAC system"
56096+ help
56097+ If you say Y here, the /dev/grsec device will be removed from the kernel,
56098+ preventing the RBAC system from being enabled. You should only say Y
56099+ here if you have no intention of using the RBAC system, so as to prevent
56100+ an attacker with root access from misusing the RBAC system to hide files
56101+ and processes when loadable module support and /dev/[k]mem have been
56102+ locked down.
56103+
56104+config GRKERNSEC_ACL_HIDEKERN
56105+ bool "Hide kernel processes"
56106+ help
56107+ If you say Y here, all kernel threads will be hidden to all
56108+ processes but those whose subject has the "view hidden processes"
56109+ flag.
56110+
56111+config GRKERNSEC_ACL_MAXTRIES
56112+ int "Maximum tries before password lockout"
56113+ default 3
56114+ help
56115+ This option enforces the maximum number of times a user can attempt
56116+ to authorize themselves with the grsecurity RBAC system before being
56117+ denied the ability to attempt authorization again for a specified time.
56118+ The lower the number, the harder it will be to brute-force a password.
56119+
56120+config GRKERNSEC_ACL_TIMEOUT
56121+ int "Time to wait after max password tries, in seconds"
56122+ default 30
56123+ help
56124+ This option specifies the time the user must wait after attempting to
56125+ authorize to the RBAC system with the maximum number of invalid
56126+ passwords. The higher the number, the harder it will be to brute-force
56127+ a password.
56128+
56129+endmenu
56130+menu "Filesystem Protections"
56131+depends on GRKERNSEC
56132+
56133+config GRKERNSEC_PROC
56134+ bool "Proc restrictions"
56135+ help
56136+ If you say Y here, the permissions of the /proc filesystem
56137+ will be altered to enhance system security and privacy. You MUST
56138+ choose either a user only restriction or a user and group restriction.
56139+ Depending upon the option you choose, you can either restrict users to
56140+ see only the processes they themselves run, or choose a group that can
56141+ view all processes and files normally restricted to root if you choose
56142+ the "restrict to user only" option. NOTE: If you're running identd as
56143+ a non-root user, you will have to run it as the group you specify here.
56144+
56145+config GRKERNSEC_PROC_USER
56146+ bool "Restrict /proc to user only"
56147+ depends on GRKERNSEC_PROC
56148+ help
56149+ If you say Y here, non-root users will only be able to view their own
56150+ processes, and restricts them from viewing network-related information,
56151+ and viewing kernel symbol and module information.
56152+
56153+config GRKERNSEC_PROC_USERGROUP
56154+ bool "Allow special group"
56155+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
56156+ help
56157+ If you say Y here, you will be able to select a group that will be
56158+ able to view all processes and network-related information. If you've
56159+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
56160+ remain hidden. This option is useful if you want to run identd as
56161+ a non-root user.
56162+
56163+config GRKERNSEC_PROC_GID
56164+ int "GID for special group"
56165+ depends on GRKERNSEC_PROC_USERGROUP
56166+ default 1001
56167+
56168+config GRKERNSEC_PROC_ADD
56169+ bool "Additional restrictions"
56170+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
56171+ help
56172+ If you say Y here, additional restrictions will be placed on
56173+ /proc that keep normal users from viewing device information and
56174+ slabinfo information that could be useful for exploits.
56175+
56176+config GRKERNSEC_LINK
56177+ bool "Linking restrictions"
56178+ help
56179+ If you say Y here, /tmp race exploits will be prevented, since users
56180+ will no longer be able to follow symlinks owned by other users in
56181+ world-writable +t directories (e.g. /tmp), unless the owner of the
56182+ symlink is the owner of the directory. users will also not be
56183+ able to hardlink to files they do not own. If the sysctl option is
56184+ enabled, a sysctl option with name "linking_restrictions" is created.
56185+
56186+config GRKERNSEC_FIFO
56187+ bool "FIFO restrictions"
56188+ help
56189+ If you say Y here, users will not be able to write to FIFOs they don't
56190+ own in world-writable +t directories (e.g. /tmp), unless the owner of
56191+ the FIFO is the same owner of the directory it's held in. If the sysctl
56192+ option is enabled, a sysctl option with name "fifo_restrictions" is
56193+ created.
56194+
56195+config GRKERNSEC_SYSFS_RESTRICT
56196+ bool "Sysfs/debugfs restriction"
56197+ depends on SYSFS
56198+ help
56199+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
56200+ any filesystem normally mounted under it (e.g. debugfs) will only
56201+ be accessible by root. These filesystems generally provide access
56202+ to hardware and debug information that isn't appropriate for unprivileged
56203+ users of the system. Sysfs and debugfs have also become a large source
56204+ of new vulnerabilities, ranging from infoleaks to local compromise.
56205+ There has been very little oversight with an eye toward security involved
56206+ in adding new exporters of information to these filesystems, so their
56207+ use is discouraged.
56208+ This option is equivalent to a chmod 0700 of the mount paths.
56209+
56210+config GRKERNSEC_ROFS
56211+ bool "Runtime read-only mount protection"
56212+ help
56213+ If you say Y here, a sysctl option with name "romount_protect" will
56214+ be created. By setting this option to 1 at runtime, filesystems
56215+ will be protected in the following ways:
56216+ * No new writable mounts will be allowed
56217+ * Existing read-only mounts won't be able to be remounted read/write
56218+ * Write operations will be denied on all block devices
56219+ This option acts independently of grsec_lock: once it is set to 1,
56220+ it cannot be turned off. Therefore, please be mindful of the resulting
56221+ behavior if this option is enabled in an init script on a read-only
56222+ filesystem. This feature is mainly intended for secure embedded systems.
56223+
56224+config GRKERNSEC_CHROOT
56225+ bool "Chroot jail restrictions"
56226+ help
56227+ If you say Y here, you will be able to choose several options that will
56228+ make breaking out of a chrooted jail much more difficult. If you
56229+ encounter no software incompatibilities with the following options, it
56230+ is recommended that you enable each one.
56231+
56232+config GRKERNSEC_CHROOT_MOUNT
56233+ bool "Deny mounts"
56234+ depends on GRKERNSEC_CHROOT
56235+ help
56236+ If you say Y here, processes inside a chroot will not be able to
56237+ mount or remount filesystems. If the sysctl option is enabled, a
56238+ sysctl option with name "chroot_deny_mount" is created.
56239+
56240+config GRKERNSEC_CHROOT_DOUBLE
56241+ bool "Deny double-chroots"
56242+ depends on GRKERNSEC_CHROOT
56243+ help
56244+ If you say Y here, processes inside a chroot will not be able to chroot
56245+ again outside the chroot. This is a widely used method of breaking
56246+ out of a chroot jail and should not be allowed. If the sysctl
56247+ option is enabled, a sysctl option with name
56248+ "chroot_deny_chroot" is created.
56249+
56250+config GRKERNSEC_CHROOT_PIVOT
56251+ bool "Deny pivot_root in chroot"
56252+ depends on GRKERNSEC_CHROOT
56253+ help
56254+ If you say Y here, processes inside a chroot will not be able to use
56255+ a function called pivot_root() that was introduced in Linux 2.3.41. It
56256+ works similar to chroot in that it changes the root filesystem. This
56257+ function could be misused in a chrooted process to attempt to break out
56258+ of the chroot, and therefore should not be allowed. If the sysctl
56259+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
56260+ created.
56261+
56262+config GRKERNSEC_CHROOT_CHDIR
56263+ bool "Enforce chdir(\"/\") on all chroots"
56264+ depends on GRKERNSEC_CHROOT
56265+ help
56266+ If you say Y here, the current working directory of all newly-chrooted
56267+ applications will be set to the the root directory of the chroot.
56268+ The man page on chroot(2) states:
56269+ Note that this call does not change the current working
56270+ directory, so that `.' can be outside the tree rooted at
56271+ `/'. In particular, the super-user can escape from a
56272+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
56273+
56274+ It is recommended that you say Y here, since it's not known to break
56275+ any software. If the sysctl option is enabled, a sysctl option with
56276+ name "chroot_enforce_chdir" is created.
56277+
56278+config GRKERNSEC_CHROOT_CHMOD
56279+ bool "Deny (f)chmod +s"
56280+ depends on GRKERNSEC_CHROOT
56281+ help
56282+ If you say Y here, processes inside a chroot will not be able to chmod
56283+ or fchmod files to make them have suid or sgid bits. This protects
56284+ against another published method of breaking a chroot. If the sysctl
56285+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
56286+ created.
56287+
56288+config GRKERNSEC_CHROOT_FCHDIR
56289+ bool "Deny fchdir out of chroot"
56290+ depends on GRKERNSEC_CHROOT
56291+ help
56292+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
56293+ to a file descriptor of the chrooting process that points to a directory
56294+ outside the filesystem will be stopped. If the sysctl option
56295+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
56296+
56297+config GRKERNSEC_CHROOT_MKNOD
56298+ bool "Deny mknod"
56299+ depends on GRKERNSEC_CHROOT
56300+ help
56301+ If you say Y here, processes inside a chroot will not be allowed to
56302+ mknod. The problem with using mknod inside a chroot is that it
56303+ would allow an attacker to create a device entry that is the same
56304+ as one on the physical root of your system, which could range from
56305+ anything from the console device to a device for your harddrive (which
56306+ they could then use to wipe the drive or steal data). It is recommended
56307+ that you say Y here, unless you run into software incompatibilities.
56308+ If the sysctl option is enabled, a sysctl option with name
56309+ "chroot_deny_mknod" is created.
56310+
56311+config GRKERNSEC_CHROOT_SHMAT
56312+ bool "Deny shmat() out of chroot"
56313+ depends on GRKERNSEC_CHROOT
56314+ help
56315+ If you say Y here, processes inside a chroot will not be able to attach
56316+ to shared memory segments that were created outside of the chroot jail.
56317+ It is recommended that you say Y here. If the sysctl option is enabled,
56318+ a sysctl option with name "chroot_deny_shmat" is created.
56319+
56320+config GRKERNSEC_CHROOT_UNIX
56321+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
56322+ depends on GRKERNSEC_CHROOT
56323+ help
56324+ If you say Y here, processes inside a chroot will not be able to
56325+ connect to abstract (meaning not belonging to a filesystem) Unix
56326+ domain sockets that were bound outside of a chroot. It is recommended
56327+ that you say Y here. If the sysctl option is enabled, a sysctl option
56328+ with name "chroot_deny_unix" is created.
56329+
56330+config GRKERNSEC_CHROOT_FINDTASK
56331+ bool "Protect outside processes"
56332+ depends on GRKERNSEC_CHROOT
56333+ help
56334+ If you say Y here, processes inside a chroot will not be able to
56335+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56336+ getsid, or view any process outside of the chroot. If the sysctl
56337+ option is enabled, a sysctl option with name "chroot_findtask" is
56338+ created.
56339+
56340+config GRKERNSEC_CHROOT_NICE
56341+ bool "Restrict priority changes"
56342+ depends on GRKERNSEC_CHROOT
56343+ help
56344+ If you say Y here, processes inside a chroot will not be able to raise
56345+ the priority of processes in the chroot, or alter the priority of
56346+ processes outside the chroot. This provides more security than simply
56347+ removing CAP_SYS_NICE from the process' capability set. If the
56348+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56349+ is created.
56350+
56351+config GRKERNSEC_CHROOT_SYSCTL
56352+ bool "Deny sysctl writes"
56353+ depends on GRKERNSEC_CHROOT
56354+ help
56355+ If you say Y here, an attacker in a chroot will not be able to
56356+ write to sysctl entries, either by sysctl(2) or through a /proc
56357+ interface. It is strongly recommended that you say Y here. If the
56358+ sysctl option is enabled, a sysctl option with name
56359+ "chroot_deny_sysctl" is created.
56360+
56361+config GRKERNSEC_CHROOT_CAPS
56362+ bool "Capability restrictions"
56363+ depends on GRKERNSEC_CHROOT
56364+ help
56365+ If you say Y here, the capabilities on all root processes within a
56366+ chroot jail will be lowered to stop module insertion, raw i/o,
56367+ system and net admin tasks, rebooting the system, modifying immutable
56368+ files, modifying IPC owned by another, and changing the system time.
56369+ This is left an option because it can break some apps. Disable this
56370+ if your chrooted apps are having problems performing those kinds of
56371+ tasks. If the sysctl option is enabled, a sysctl option with
56372+ name "chroot_caps" is created.
56373+
56374+endmenu
56375+menu "Kernel Auditing"
56376+depends on GRKERNSEC
56377+
56378+config GRKERNSEC_AUDIT_GROUP
56379+ bool "Single group for auditing"
56380+ help
56381+ If you say Y here, the exec, chdir, and (un)mount logging features
56382+ will only operate on a group you specify. This option is recommended
56383+ if you only want to watch certain users instead of having a large
56384+ amount of logs from the entire system. If the sysctl option is enabled,
56385+ a sysctl option with name "audit_group" is created.
56386+
56387+config GRKERNSEC_AUDIT_GID
56388+ int "GID for auditing"
56389+ depends on GRKERNSEC_AUDIT_GROUP
56390+ default 1007
56391+
56392+config GRKERNSEC_EXECLOG
56393+ bool "Exec logging"
56394+ help
56395+ If you say Y here, all execve() calls will be logged (since the
56396+ other exec*() calls are frontends to execve(), all execution
56397+ will be logged). Useful for shell-servers that like to keep track
56398+ of their users. If the sysctl option is enabled, a sysctl option with
56399+ name "exec_logging" is created.
56400+ WARNING: This option when enabled will produce a LOT of logs, especially
56401+ on an active system.
56402+
56403+config GRKERNSEC_RESLOG
56404+ bool "Resource logging"
56405+ help
56406+ If you say Y here, all attempts to overstep resource limits will
56407+ be logged with the resource name, the requested size, and the current
56408+ limit. It is highly recommended that you say Y here. If the sysctl
56409+ option is enabled, a sysctl option with name "resource_logging" is
56410+ created. If the RBAC system is enabled, the sysctl value is ignored.
56411+
56412+config GRKERNSEC_CHROOT_EXECLOG
56413+ bool "Log execs within chroot"
56414+ help
56415+ If you say Y here, all executions inside a chroot jail will be logged
56416+ to syslog. This can cause a large amount of logs if certain
56417+ applications (eg. djb's daemontools) are installed on the system, and
56418+ is therefore left as an option. If the sysctl option is enabled, a
56419+ sysctl option with name "chroot_execlog" is created.
56420+
56421+config GRKERNSEC_AUDIT_PTRACE
56422+ bool "Ptrace logging"
56423+ help
56424+ If you say Y here, all attempts to attach to a process via ptrace
56425+ will be logged. If the sysctl option is enabled, a sysctl option
56426+ with name "audit_ptrace" is created.
56427+
56428+config GRKERNSEC_AUDIT_CHDIR
56429+ bool "Chdir logging"
56430+ help
56431+ If you say Y here, all chdir() calls will be logged. If the sysctl
56432+ option is enabled, a sysctl option with name "audit_chdir" is created.
56433+
56434+config GRKERNSEC_AUDIT_MOUNT
56435+ bool "(Un)Mount logging"
56436+ help
56437+ If you say Y here, all mounts and unmounts will be logged. If the
56438+ sysctl option is enabled, a sysctl option with name "audit_mount" is
56439+ created.
56440+
56441+config GRKERNSEC_SIGNAL
56442+ bool "Signal logging"
56443+ help
56444+ If you say Y here, certain important signals will be logged, such as
56445+ SIGSEGV, which will as a result inform you of when a error in a program
56446+ occurred, which in some cases could mean a possible exploit attempt.
56447+ If the sysctl option is enabled, a sysctl option with name
56448+ "signal_logging" is created.
56449+
56450+config GRKERNSEC_FORKFAIL
56451+ bool "Fork failure logging"
56452+ help
56453+ If you say Y here, all failed fork() attempts will be logged.
56454+ This could suggest a fork bomb, or someone attempting to overstep
56455+ their process limit. If the sysctl option is enabled, a sysctl option
56456+ with name "forkfail_logging" is created.
56457+
56458+config GRKERNSEC_TIME
56459+ bool "Time change logging"
56460+ help
56461+ If you say Y here, any changes of the system clock will be logged.
56462+ If the sysctl option is enabled, a sysctl option with name
56463+ "timechange_logging" is created.
56464+
56465+config GRKERNSEC_PROC_IPADDR
56466+ bool "/proc/<pid>/ipaddr support"
56467+ help
56468+ If you say Y here, a new entry will be added to each /proc/<pid>
56469+ directory that contains the IP address of the person using the task.
56470+ The IP is carried across local TCP and AF_UNIX stream sockets.
56471+ This information can be useful for IDS/IPSes to perform remote response
56472+ to a local attack. The entry is readable by only the owner of the
56473+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56474+ the RBAC system), and thus does not create privacy concerns.
56475+
56476+config GRKERNSEC_RWXMAP_LOG
56477+ bool 'Denied RWX mmap/mprotect logging'
56478+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56479+ help
56480+ If you say Y here, calls to mmap() and mprotect() with explicit
56481+ usage of PROT_WRITE and PROT_EXEC together will be logged when
56482+ denied by the PAX_MPROTECT feature. If the sysctl option is
56483+ enabled, a sysctl option with name "rwxmap_logging" is created.
56484+
56485+config GRKERNSEC_AUDIT_TEXTREL
56486+ bool 'ELF text relocations logging (READ HELP)'
56487+ depends on PAX_MPROTECT
56488+ help
56489+ If you say Y here, text relocations will be logged with the filename
56490+ of the offending library or binary. The purpose of the feature is
56491+ to help Linux distribution developers get rid of libraries and
56492+ binaries that need text relocations which hinder the future progress
56493+ of PaX. Only Linux distribution developers should say Y here, and
56494+ never on a production machine, as this option creates an information
56495+ leak that could aid an attacker in defeating the randomization of
56496+ a single memory region. If the sysctl option is enabled, a sysctl
56497+ option with name "audit_textrel" is created.
56498+
56499+endmenu
56500+
56501+menu "Executable Protections"
56502+depends on GRKERNSEC
56503+
56504+config GRKERNSEC_DMESG
56505+ bool "Dmesg(8) restriction"
56506+ help
56507+ If you say Y here, non-root users will not be able to use dmesg(8)
56508+ to view up to the last 4kb of messages in the kernel's log buffer.
56509+ The kernel's log buffer often contains kernel addresses and other
56510+ identifying information useful to an attacker in fingerprinting a
56511+ system for a targeted exploit.
56512+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
56513+ created.
56514+
56515+config GRKERNSEC_HARDEN_PTRACE
56516+ bool "Deter ptrace-based process snooping"
56517+ help
56518+ If you say Y here, TTY sniffers and other malicious monitoring
56519+ programs implemented through ptrace will be defeated. If you
56520+ have been using the RBAC system, this option has already been
56521+ enabled for several years for all users, with the ability to make
56522+ fine-grained exceptions.
56523+
56524+ This option only affects the ability of non-root users to ptrace
56525+ processes that are not a descendent of the ptracing process.
56526+ This means that strace ./binary and gdb ./binary will still work,
56527+ but attaching to arbitrary processes will not. If the sysctl
56528+ option is enabled, a sysctl option with name "harden_ptrace" is
56529+ created.
56530+
56531+config GRKERNSEC_TPE
56532+ bool "Trusted Path Execution (TPE)"
56533+ help
56534+ If you say Y here, you will be able to choose a gid to add to the
56535+ supplementary groups of users you want to mark as "untrusted."
56536+ These users will not be able to execute any files that are not in
56537+ root-owned directories writable only by root. If the sysctl option
56538+ is enabled, a sysctl option with name "tpe" is created.
56539+
56540+config GRKERNSEC_TPE_ALL
56541+ bool "Partially restrict all non-root users"
56542+ depends on GRKERNSEC_TPE
56543+ help
56544+ If you say Y here, all non-root users will be covered under
56545+ a weaker TPE restriction. This is separate from, and in addition to,
56546+ the main TPE options that you have selected elsewhere. Thus, if a
56547+ "trusted" GID is chosen, this restriction applies to even that GID.
56548+ Under this restriction, all non-root users will only be allowed to
56549+ execute files in directories they own that are not group or
56550+ world-writable, or in directories owned by root and writable only by
56551+ root. If the sysctl option is enabled, a sysctl option with name
56552+ "tpe_restrict_all" is created.
56553+
56554+config GRKERNSEC_TPE_INVERT
56555+ bool "Invert GID option"
56556+ depends on GRKERNSEC_TPE
56557+ help
56558+ If you say Y here, the group you specify in the TPE configuration will
56559+ decide what group TPE restrictions will be *disabled* for. This
56560+ option is useful if you want TPE restrictions to be applied to most
56561+ users on the system. If the sysctl option is enabled, a sysctl option
56562+ with name "tpe_invert" is created. Unlike other sysctl options, this
56563+ entry will default to on for backward-compatibility.
56564+
56565+config GRKERNSEC_TPE_GID
56566+ int "GID for untrusted users"
56567+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56568+ default 1005
56569+ help
56570+ Setting this GID determines what group TPE restrictions will be
56571+ *enabled* for. If the sysctl option is enabled, a sysctl option
56572+ with name "tpe_gid" is created.
56573+
56574+config GRKERNSEC_TPE_GID
56575+ int "GID for trusted users"
56576+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56577+ default 1005
56578+ help
56579+ Setting this GID determines what group TPE restrictions will be
56580+ *disabled* for. If the sysctl option is enabled, a sysctl option
56581+ with name "tpe_gid" is created.
56582+
56583+endmenu
56584+menu "Network Protections"
56585+depends on GRKERNSEC
56586+
56587+config GRKERNSEC_RANDNET
56588+ bool "Larger entropy pools"
56589+ help
56590+ If you say Y here, the entropy pools used for many features of Linux
56591+ and grsecurity will be doubled in size. Since several grsecurity
56592+ features use additional randomness, it is recommended that you say Y
56593+ here. Saying Y here has a similar effect as modifying
56594+ /proc/sys/kernel/random/poolsize.
56595+
56596+config GRKERNSEC_BLACKHOLE
56597+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56598+ depends on NET
56599+ help
56600+ If you say Y here, neither TCP resets nor ICMP
56601+ destination-unreachable packets will be sent in response to packets
56602+ sent to ports for which no associated listening process exists.
56603+ This feature supports both IPV4 and IPV6 and exempts the
56604+ loopback interface from blackholing. Enabling this feature
56605+ makes a host more resilient to DoS attacks and reduces network
56606+ visibility against scanners.
56607+
56608+ The blackhole feature as-implemented is equivalent to the FreeBSD
56609+ blackhole feature, as it prevents RST responses to all packets, not
56610+ just SYNs. Under most application behavior this causes no
56611+ problems, but applications (like haproxy) may not close certain
56612+ connections in a way that cleanly terminates them on the remote
56613+ end, leaving the remote host in LAST_ACK state. Because of this
56614+ side-effect and to prevent intentional LAST_ACK DoSes, this
56615+ feature also adds automatic mitigation against such attacks.
56616+ The mitigation drastically reduces the amount of time a socket
56617+ can spend in LAST_ACK state. If you're using haproxy and not
56618+ all servers it connects to have this option enabled, consider
56619+ disabling this feature on the haproxy host.
56620+
56621+ If the sysctl option is enabled, two sysctl options with names
56622+ "ip_blackhole" and "lastack_retries" will be created.
56623+ While "ip_blackhole" takes the standard zero/non-zero on/off
56624+ toggle, "lastack_retries" uses the same kinds of values as
56625+ "tcp_retries1" and "tcp_retries2". The default value of 4
56626+ prevents a socket from lasting more than 45 seconds in LAST_ACK
56627+ state.
56628+
56629+config GRKERNSEC_SOCKET
56630+ bool "Socket restrictions"
56631+ depends on NET
56632+ help
56633+ If you say Y here, you will be able to choose from several options.
56634+ If you assign a GID on your system and add it to the supplementary
56635+ groups of users you want to restrict socket access to, this patch
56636+ will perform up to three things, based on the option(s) you choose.
56637+
56638+config GRKERNSEC_SOCKET_ALL
56639+ bool "Deny any sockets to group"
56640+ depends on GRKERNSEC_SOCKET
56641+ help
56642+ If you say Y here, you will be able to choose a GID of whose users will
56643+ be unable to connect to other hosts from your machine or run server
56644+ applications from your machine. If the sysctl option is enabled, a
56645+ sysctl option with name "socket_all" is created.
56646+
56647+config GRKERNSEC_SOCKET_ALL_GID
56648+ int "GID to deny all sockets for"
56649+ depends on GRKERNSEC_SOCKET_ALL
56650+ default 1004
56651+ help
56652+ Here you can choose the GID to disable socket access for. Remember to
56653+ add the users you want socket access disabled for to the GID
56654+ specified here. If the sysctl option is enabled, a sysctl option
56655+ with name "socket_all_gid" is created.
56656+
56657+config GRKERNSEC_SOCKET_CLIENT
56658+ bool "Deny client sockets to group"
56659+ depends on GRKERNSEC_SOCKET
56660+ help
56661+ If you say Y here, you will be able to choose a GID of whose users will
56662+ be unable to connect to other hosts from your machine, but will be
56663+ able to run servers. If this option is enabled, all users in the group
56664+ you specify will have to use passive mode when initiating ftp transfers
56665+ from the shell on your machine. If the sysctl option is enabled, a
56666+ sysctl option with name "socket_client" is created.
56667+
56668+config GRKERNSEC_SOCKET_CLIENT_GID
56669+ int "GID to deny client sockets for"
56670+ depends on GRKERNSEC_SOCKET_CLIENT
56671+ default 1003
56672+ help
56673+ Here you can choose the GID to disable client socket access for.
56674+ Remember to add the users you want client socket access disabled for to
56675+ the GID specified here. If the sysctl option is enabled, a sysctl
56676+ option with name "socket_client_gid" is created.
56677+
56678+config GRKERNSEC_SOCKET_SERVER
56679+ bool "Deny server sockets to group"
56680+ depends on GRKERNSEC_SOCKET
56681+ help
56682+ If you say Y here, you will be able to choose a GID of whose users will
56683+ be unable to run server applications from your machine. If the sysctl
56684+ option is enabled, a sysctl option with name "socket_server" is created.
56685+
56686+config GRKERNSEC_SOCKET_SERVER_GID
56687+ int "GID to deny server sockets for"
56688+ depends on GRKERNSEC_SOCKET_SERVER
56689+ default 1002
56690+ help
56691+ Here you can choose the GID to disable server socket access for.
56692+ Remember to add the users you want server socket access disabled for to
56693+ the GID specified here. If the sysctl option is enabled, a sysctl
56694+ option with name "socket_server_gid" is created.
56695+
56696+endmenu
56697+menu "Sysctl support"
56698+depends on GRKERNSEC && SYSCTL
56699+
56700+config GRKERNSEC_SYSCTL
56701+ bool "Sysctl support"
56702+ help
56703+ If you say Y here, you will be able to change the options that
56704+ grsecurity runs with at bootup, without having to recompile your
56705+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56706+ to enable (1) or disable (0) various features. All the sysctl entries
56707+ are mutable until the "grsec_lock" entry is set to a non-zero value.
56708+ All features enabled in the kernel configuration are disabled at boot
56709+ if you do not say Y to the "Turn on features by default" option.
56710+ All options should be set at startup, and the grsec_lock entry should
56711+ be set to a non-zero value after all the options are set.
56712+ *THIS IS EXTREMELY IMPORTANT*
56713+
56714+config GRKERNSEC_SYSCTL_DISTRO
56715+ bool "Extra sysctl support for distro makers (READ HELP)"
56716+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56717+ help
56718+ If you say Y here, additional sysctl options will be created
56719+ for features that affect processes running as root. Therefore,
56720+ it is critical when using this option that the grsec_lock entry be
56721+ enabled after boot. Only distros with prebuilt kernel packages
56722+ with this option enabled that can ensure grsec_lock is enabled
56723+ after boot should use this option.
56724+ *Failure to set grsec_lock after boot makes all grsec features
56725+ this option covers useless*
56726+
56727+ Currently this option creates the following sysctl entries:
56728+ "Disable Privileged I/O": "disable_priv_io"
56729+
56730+config GRKERNSEC_SYSCTL_ON
56731+ bool "Turn on features by default"
56732+ depends on GRKERNSEC_SYSCTL
56733+ help
56734+ If you say Y here, instead of having all features enabled in the
56735+ kernel configuration disabled at boot time, the features will be
56736+ enabled at boot time. It is recommended you say Y here unless
56737+ there is some reason you would want all sysctl-tunable features to
56738+ be disabled by default. As mentioned elsewhere, it is important
56739+ to enable the grsec_lock entry once you have finished modifying
56740+ the sysctl entries.
56741+
56742+endmenu
56743+menu "Logging Options"
56744+depends on GRKERNSEC
56745+
56746+config GRKERNSEC_FLOODTIME
56747+ int "Seconds in between log messages (minimum)"
56748+ default 10
56749+ help
56750+ This option allows you to enforce the number of seconds between
56751+ grsecurity log messages. The default should be suitable for most
56752+ people, however, if you choose to change it, choose a value small enough
56753+ to allow informative logs to be produced, but large enough to
56754+ prevent flooding.
56755+
56756+config GRKERNSEC_FLOODBURST
56757+ int "Number of messages in a burst (maximum)"
56758+ default 4
56759+ help
56760+ This option allows you to choose the maximum number of messages allowed
56761+ within the flood time interval you chose in a separate option. The
56762+ default should be suitable for most people, however if you find that
56763+ many of your logs are being interpreted as flooding, you may want to
56764+ raise this value.
56765+
56766+endmenu
56767+
56768+endmenu
56769diff -urNp linux-2.6.32.45/grsecurity/Makefile linux-2.6.32.45/grsecurity/Makefile
56770--- linux-2.6.32.45/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
56771+++ linux-2.6.32.45/grsecurity/Makefile 2011-08-21 18:54:34.000000000 -0400
56772@@ -0,0 +1,34 @@
56773+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56774+# during 2001-2009 it has been completely redesigned by Brad Spengler
56775+# into an RBAC system
56776+#
56777+# All code in this directory and various hooks inserted throughout the kernel
56778+# are copyright Brad Spengler - Open Source Security, Inc., and released
56779+# under the GPL v2 or higher
56780+
56781+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56782+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
56783+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56784+
56785+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56786+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56787+ gracl_learn.o grsec_log.o
56788+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56789+
56790+ifdef CONFIG_NET
56791+obj-y += grsec_sock.o
56792+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56793+endif
56794+
56795+ifndef CONFIG_GRKERNSEC
56796+obj-y += grsec_disabled.o
56797+endif
56798+
56799+ifdef CONFIG_GRKERNSEC_HIDESYM
56800+extra-y := grsec_hidesym.o
56801+$(obj)/grsec_hidesym.o:
56802+ @-chmod -f 500 /boot
56803+ @-chmod -f 500 /lib/modules
56804+ @-chmod -f 700 .
56805+ @echo ' grsec: protected kernel image paths'
56806+endif
56807diff -urNp linux-2.6.32.45/include/acpi/acpi_bus.h linux-2.6.32.45/include/acpi/acpi_bus.h
56808--- linux-2.6.32.45/include/acpi/acpi_bus.h 2011-03-27 14:31:47.000000000 -0400
56809+++ linux-2.6.32.45/include/acpi/acpi_bus.h 2011-08-05 20:33:55.000000000 -0400
56810@@ -107,7 +107,7 @@ struct acpi_device_ops {
56811 acpi_op_bind bind;
56812 acpi_op_unbind unbind;
56813 acpi_op_notify notify;
56814-};
56815+} __no_const;
56816
56817 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
56818
56819diff -urNp linux-2.6.32.45/include/acpi/acpi_drivers.h linux-2.6.32.45/include/acpi/acpi_drivers.h
56820--- linux-2.6.32.45/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
56821+++ linux-2.6.32.45/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
56822@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
56823 Dock Station
56824 -------------------------------------------------------------------------- */
56825 struct acpi_dock_ops {
56826- acpi_notify_handler handler;
56827- acpi_notify_handler uevent;
56828+ const acpi_notify_handler handler;
56829+ const acpi_notify_handler uevent;
56830 };
56831
56832 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
56833@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
56834 extern int register_dock_notifier(struct notifier_block *nb);
56835 extern void unregister_dock_notifier(struct notifier_block *nb);
56836 extern int register_hotplug_dock_device(acpi_handle handle,
56837- struct acpi_dock_ops *ops,
56838+ const struct acpi_dock_ops *ops,
56839 void *context);
56840 extern void unregister_hotplug_dock_device(acpi_handle handle);
56841 #else
56842@@ -144,7 +144,7 @@ static inline void unregister_dock_notif
56843 {
56844 }
56845 static inline int register_hotplug_dock_device(acpi_handle handle,
56846- struct acpi_dock_ops *ops,
56847+ const struct acpi_dock_ops *ops,
56848 void *context)
56849 {
56850 return -ENODEV;
56851diff -urNp linux-2.6.32.45/include/asm-generic/atomic-long.h linux-2.6.32.45/include/asm-generic/atomic-long.h
56852--- linux-2.6.32.45/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
56853+++ linux-2.6.32.45/include/asm-generic/atomic-long.h 2011-07-13 22:21:25.000000000 -0400
56854@@ -22,6 +22,12 @@
56855
56856 typedef atomic64_t atomic_long_t;
56857
56858+#ifdef CONFIG_PAX_REFCOUNT
56859+typedef atomic64_unchecked_t atomic_long_unchecked_t;
56860+#else
56861+typedef atomic64_t atomic_long_unchecked_t;
56862+#endif
56863+
56864 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
56865
56866 static inline long atomic_long_read(atomic_long_t *l)
56867@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
56868 return (long)atomic64_read(v);
56869 }
56870
56871+#ifdef CONFIG_PAX_REFCOUNT
56872+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56873+{
56874+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56875+
56876+ return (long)atomic64_read_unchecked(v);
56877+}
56878+#endif
56879+
56880 static inline void atomic_long_set(atomic_long_t *l, long i)
56881 {
56882 atomic64_t *v = (atomic64_t *)l;
56883@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
56884 atomic64_set(v, i);
56885 }
56886
56887+#ifdef CONFIG_PAX_REFCOUNT
56888+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56889+{
56890+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56891+
56892+ atomic64_set_unchecked(v, i);
56893+}
56894+#endif
56895+
56896 static inline void atomic_long_inc(atomic_long_t *l)
56897 {
56898 atomic64_t *v = (atomic64_t *)l;
56899@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
56900 atomic64_inc(v);
56901 }
56902
56903+#ifdef CONFIG_PAX_REFCOUNT
56904+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
56905+{
56906+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56907+
56908+ atomic64_inc_unchecked(v);
56909+}
56910+#endif
56911+
56912 static inline void atomic_long_dec(atomic_long_t *l)
56913 {
56914 atomic64_t *v = (atomic64_t *)l;
56915@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
56916 atomic64_dec(v);
56917 }
56918
56919+#ifdef CONFIG_PAX_REFCOUNT
56920+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
56921+{
56922+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56923+
56924+ atomic64_dec_unchecked(v);
56925+}
56926+#endif
56927+
56928 static inline void atomic_long_add(long i, atomic_long_t *l)
56929 {
56930 atomic64_t *v = (atomic64_t *)l;
56931@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
56932 atomic64_add(i, v);
56933 }
56934
56935+#ifdef CONFIG_PAX_REFCOUNT
56936+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
56937+{
56938+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56939+
56940+ atomic64_add_unchecked(i, v);
56941+}
56942+#endif
56943+
56944 static inline void atomic_long_sub(long i, atomic_long_t *l)
56945 {
56946 atomic64_t *v = (atomic64_t *)l;
56947@@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
56948 return (long)atomic64_inc_return(v);
56949 }
56950
56951+#ifdef CONFIG_PAX_REFCOUNT
56952+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
56953+{
56954+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
56955+
56956+ return (long)atomic64_inc_return_unchecked(v);
56957+}
56958+#endif
56959+
56960 static inline long atomic_long_dec_return(atomic_long_t *l)
56961 {
56962 atomic64_t *v = (atomic64_t *)l;
56963@@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
56964
56965 typedef atomic_t atomic_long_t;
56966
56967+#ifdef CONFIG_PAX_REFCOUNT
56968+typedef atomic_unchecked_t atomic_long_unchecked_t;
56969+#else
56970+typedef atomic_t atomic_long_unchecked_t;
56971+#endif
56972+
56973 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
56974 static inline long atomic_long_read(atomic_long_t *l)
56975 {
56976@@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
56977 return (long)atomic_read(v);
56978 }
56979
56980+#ifdef CONFIG_PAX_REFCOUNT
56981+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
56982+{
56983+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
56984+
56985+ return (long)atomic_read_unchecked(v);
56986+}
56987+#endif
56988+
56989 static inline void atomic_long_set(atomic_long_t *l, long i)
56990 {
56991 atomic_t *v = (atomic_t *)l;
56992@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
56993 atomic_set(v, i);
56994 }
56995
56996+#ifdef CONFIG_PAX_REFCOUNT
56997+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
56998+{
56999+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57000+
57001+ atomic_set_unchecked(v, i);
57002+}
57003+#endif
57004+
57005 static inline void atomic_long_inc(atomic_long_t *l)
57006 {
57007 atomic_t *v = (atomic_t *)l;
57008@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
57009 atomic_inc(v);
57010 }
57011
57012+#ifdef CONFIG_PAX_REFCOUNT
57013+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57014+{
57015+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57016+
57017+ atomic_inc_unchecked(v);
57018+}
57019+#endif
57020+
57021 static inline void atomic_long_dec(atomic_long_t *l)
57022 {
57023 atomic_t *v = (atomic_t *)l;
57024@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
57025 atomic_dec(v);
57026 }
57027
57028+#ifdef CONFIG_PAX_REFCOUNT
57029+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57030+{
57031+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57032+
57033+ atomic_dec_unchecked(v);
57034+}
57035+#endif
57036+
57037 static inline void atomic_long_add(long i, atomic_long_t *l)
57038 {
57039 atomic_t *v = (atomic_t *)l;
57040@@ -176,6 +278,15 @@ static inline void atomic_long_add(long
57041 atomic_add(i, v);
57042 }
57043
57044+#ifdef CONFIG_PAX_REFCOUNT
57045+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57046+{
57047+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57048+
57049+ atomic_add_unchecked(i, v);
57050+}
57051+#endif
57052+
57053 static inline void atomic_long_sub(long i, atomic_long_t *l)
57054 {
57055 atomic_t *v = (atomic_t *)l;
57056@@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
57057 return (long)atomic_inc_return(v);
57058 }
57059
57060+#ifdef CONFIG_PAX_REFCOUNT
57061+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57062+{
57063+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57064+
57065+ return (long)atomic_inc_return_unchecked(v);
57066+}
57067+#endif
57068+
57069 static inline long atomic_long_dec_return(atomic_long_t *l)
57070 {
57071 atomic_t *v = (atomic_t *)l;
57072@@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
57073
57074 #endif /* BITS_PER_LONG == 64 */
57075
57076+#ifdef CONFIG_PAX_REFCOUNT
57077+static inline void pax_refcount_needs_these_functions(void)
57078+{
57079+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
57080+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57081+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57082+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57083+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57084+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57085+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57086+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57087+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57088+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57089+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57090+
57091+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57092+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57093+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57094+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57095+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57096+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57097+}
57098+#else
57099+#define atomic_read_unchecked(v) atomic_read(v)
57100+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57101+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57102+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57103+#define atomic_inc_unchecked(v) atomic_inc(v)
57104+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57105+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57106+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57107+#define atomic_dec_unchecked(v) atomic_dec(v)
57108+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57109+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57110+
57111+#define atomic_long_read_unchecked(v) atomic_long_read(v)
57112+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57113+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57114+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57115+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57116+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57117+#endif
57118+
57119 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57120diff -urNp linux-2.6.32.45/include/asm-generic/bug.h linux-2.6.32.45/include/asm-generic/bug.h
57121--- linux-2.6.32.45/include/asm-generic/bug.h 2011-07-13 17:23:04.000000000 -0400
57122+++ linux-2.6.32.45/include/asm-generic/bug.h 2011-08-21 17:56:07.000000000 -0400
57123@@ -105,11 +105,11 @@ extern void warn_slowpath_null(const cha
57124
57125 #else /* !CONFIG_BUG */
57126 #ifndef HAVE_ARCH_BUG
57127-#define BUG() do {} while(0)
57128+#define BUG() do { for (;;) ; } while(0)
57129 #endif
57130
57131 #ifndef HAVE_ARCH_BUG_ON
57132-#define BUG_ON(condition) do { if (condition) ; } while(0)
57133+#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
57134 #endif
57135
57136 #ifndef HAVE_ARCH_WARN_ON
57137diff -urNp linux-2.6.32.45/include/asm-generic/cache.h linux-2.6.32.45/include/asm-generic/cache.h
57138--- linux-2.6.32.45/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
57139+++ linux-2.6.32.45/include/asm-generic/cache.h 2011-07-06 19:53:33.000000000 -0400
57140@@ -6,7 +6,7 @@
57141 * cache lines need to provide their own cache.h.
57142 */
57143
57144-#define L1_CACHE_SHIFT 5
57145-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57146+#define L1_CACHE_SHIFT 5UL
57147+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57148
57149 #endif /* __ASM_GENERIC_CACHE_H */
57150diff -urNp linux-2.6.32.45/include/asm-generic/dma-mapping-common.h linux-2.6.32.45/include/asm-generic/dma-mapping-common.h
57151--- linux-2.6.32.45/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
57152+++ linux-2.6.32.45/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
57153@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
57154 enum dma_data_direction dir,
57155 struct dma_attrs *attrs)
57156 {
57157- struct dma_map_ops *ops = get_dma_ops(dev);
57158+ const struct dma_map_ops *ops = get_dma_ops(dev);
57159 dma_addr_t addr;
57160
57161 kmemcheck_mark_initialized(ptr, size);
57162@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
57163 enum dma_data_direction dir,
57164 struct dma_attrs *attrs)
57165 {
57166- struct dma_map_ops *ops = get_dma_ops(dev);
57167+ const struct dma_map_ops *ops = get_dma_ops(dev);
57168
57169 BUG_ON(!valid_dma_direction(dir));
57170 if (ops->unmap_page)
57171@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
57172 int nents, enum dma_data_direction dir,
57173 struct dma_attrs *attrs)
57174 {
57175- struct dma_map_ops *ops = get_dma_ops(dev);
57176+ const struct dma_map_ops *ops = get_dma_ops(dev);
57177 int i, ents;
57178 struct scatterlist *s;
57179
57180@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
57181 int nents, enum dma_data_direction dir,
57182 struct dma_attrs *attrs)
57183 {
57184- struct dma_map_ops *ops = get_dma_ops(dev);
57185+ const struct dma_map_ops *ops = get_dma_ops(dev);
57186
57187 BUG_ON(!valid_dma_direction(dir));
57188 debug_dma_unmap_sg(dev, sg, nents, dir);
57189@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
57190 size_t offset, size_t size,
57191 enum dma_data_direction dir)
57192 {
57193- struct dma_map_ops *ops = get_dma_ops(dev);
57194+ const struct dma_map_ops *ops = get_dma_ops(dev);
57195 dma_addr_t addr;
57196
57197 kmemcheck_mark_initialized(page_address(page) + offset, size);
57198@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
57199 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
57200 size_t size, enum dma_data_direction dir)
57201 {
57202- struct dma_map_ops *ops = get_dma_ops(dev);
57203+ const struct dma_map_ops *ops = get_dma_ops(dev);
57204
57205 BUG_ON(!valid_dma_direction(dir));
57206 if (ops->unmap_page)
57207@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
57208 size_t size,
57209 enum dma_data_direction dir)
57210 {
57211- struct dma_map_ops *ops = get_dma_ops(dev);
57212+ const struct dma_map_ops *ops = get_dma_ops(dev);
57213
57214 BUG_ON(!valid_dma_direction(dir));
57215 if (ops->sync_single_for_cpu)
57216@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
57217 dma_addr_t addr, size_t size,
57218 enum dma_data_direction dir)
57219 {
57220- struct dma_map_ops *ops = get_dma_ops(dev);
57221+ const struct dma_map_ops *ops = get_dma_ops(dev);
57222
57223 BUG_ON(!valid_dma_direction(dir));
57224 if (ops->sync_single_for_device)
57225@@ -123,7 +123,7 @@ static inline void dma_sync_single_range
57226 size_t size,
57227 enum dma_data_direction dir)
57228 {
57229- struct dma_map_ops *ops = get_dma_ops(dev);
57230+ const struct dma_map_ops *ops = get_dma_ops(dev);
57231
57232 BUG_ON(!valid_dma_direction(dir));
57233 if (ops->sync_single_range_for_cpu) {
57234@@ -140,7 +140,7 @@ static inline void dma_sync_single_range
57235 size_t size,
57236 enum dma_data_direction dir)
57237 {
57238- struct dma_map_ops *ops = get_dma_ops(dev);
57239+ const struct dma_map_ops *ops = get_dma_ops(dev);
57240
57241 BUG_ON(!valid_dma_direction(dir));
57242 if (ops->sync_single_range_for_device) {
57243@@ -155,7 +155,7 @@ static inline void
57244 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
57245 int nelems, enum dma_data_direction dir)
57246 {
57247- struct dma_map_ops *ops = get_dma_ops(dev);
57248+ const struct dma_map_ops *ops = get_dma_ops(dev);
57249
57250 BUG_ON(!valid_dma_direction(dir));
57251 if (ops->sync_sg_for_cpu)
57252@@ -167,7 +167,7 @@ static inline void
57253 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
57254 int nelems, enum dma_data_direction dir)
57255 {
57256- struct dma_map_ops *ops = get_dma_ops(dev);
57257+ const struct dma_map_ops *ops = get_dma_ops(dev);
57258
57259 BUG_ON(!valid_dma_direction(dir));
57260 if (ops->sync_sg_for_device)
57261diff -urNp linux-2.6.32.45/include/asm-generic/emergency-restart.h linux-2.6.32.45/include/asm-generic/emergency-restart.h
57262--- linux-2.6.32.45/include/asm-generic/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
57263+++ linux-2.6.32.45/include/asm-generic/emergency-restart.h 2011-08-21 19:17:17.000000000 -0400
57264@@ -1,7 +1,7 @@
57265 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
57266 #define _ASM_GENERIC_EMERGENCY_RESTART_H
57267
57268-static inline void machine_emergency_restart(void)
57269+static inline __noreturn void machine_emergency_restart(void)
57270 {
57271 machine_restart(NULL);
57272 }
57273diff -urNp linux-2.6.32.45/include/asm-generic/futex.h linux-2.6.32.45/include/asm-generic/futex.h
57274--- linux-2.6.32.45/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
57275+++ linux-2.6.32.45/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
57276@@ -6,7 +6,7 @@
57277 #include <asm/errno.h>
57278
57279 static inline int
57280-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
57281+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
57282 {
57283 int op = (encoded_op >> 28) & 7;
57284 int cmp = (encoded_op >> 24) & 15;
57285@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
57286 }
57287
57288 static inline int
57289-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
57290+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
57291 {
57292 return -ENOSYS;
57293 }
57294diff -urNp linux-2.6.32.45/include/asm-generic/int-l64.h linux-2.6.32.45/include/asm-generic/int-l64.h
57295--- linux-2.6.32.45/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
57296+++ linux-2.6.32.45/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
57297@@ -46,6 +46,8 @@ typedef unsigned int u32;
57298 typedef signed long s64;
57299 typedef unsigned long u64;
57300
57301+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57302+
57303 #define S8_C(x) x
57304 #define U8_C(x) x ## U
57305 #define S16_C(x) x
57306diff -urNp linux-2.6.32.45/include/asm-generic/int-ll64.h linux-2.6.32.45/include/asm-generic/int-ll64.h
57307--- linux-2.6.32.45/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
57308+++ linux-2.6.32.45/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
57309@@ -51,6 +51,8 @@ typedef unsigned int u32;
57310 typedef signed long long s64;
57311 typedef unsigned long long u64;
57312
57313+typedef unsigned long long intoverflow_t;
57314+
57315 #define S8_C(x) x
57316 #define U8_C(x) x ## U
57317 #define S16_C(x) x
57318diff -urNp linux-2.6.32.45/include/asm-generic/kmap_types.h linux-2.6.32.45/include/asm-generic/kmap_types.h
57319--- linux-2.6.32.45/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
57320+++ linux-2.6.32.45/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
57321@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
57322 KMAP_D(16) KM_IRQ_PTE,
57323 KMAP_D(17) KM_NMI,
57324 KMAP_D(18) KM_NMI_PTE,
57325-KMAP_D(19) KM_TYPE_NR
57326+KMAP_D(19) KM_CLEARPAGE,
57327+KMAP_D(20) KM_TYPE_NR
57328 };
57329
57330 #undef KMAP_D
57331diff -urNp linux-2.6.32.45/include/asm-generic/pgtable.h linux-2.6.32.45/include/asm-generic/pgtable.h
57332--- linux-2.6.32.45/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
57333+++ linux-2.6.32.45/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
57334@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
57335 unsigned long size);
57336 #endif
57337
57338+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57339+static inline unsigned long pax_open_kernel(void) { return 0; }
57340+#endif
57341+
57342+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57343+static inline unsigned long pax_close_kernel(void) { return 0; }
57344+#endif
57345+
57346 #endif /* !__ASSEMBLY__ */
57347
57348 #endif /* _ASM_GENERIC_PGTABLE_H */
57349diff -urNp linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h
57350--- linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
57351+++ linux-2.6.32.45/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
57352@@ -1,14 +1,19 @@
57353 #ifndef _PGTABLE_NOPMD_H
57354 #define _PGTABLE_NOPMD_H
57355
57356-#ifndef __ASSEMBLY__
57357-
57358 #include <asm-generic/pgtable-nopud.h>
57359
57360-struct mm_struct;
57361-
57362 #define __PAGETABLE_PMD_FOLDED
57363
57364+#define PMD_SHIFT PUD_SHIFT
57365+#define PTRS_PER_PMD 1
57366+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57367+#define PMD_MASK (~(PMD_SIZE-1))
57368+
57369+#ifndef __ASSEMBLY__
57370+
57371+struct mm_struct;
57372+
57373 /*
57374 * Having the pmd type consist of a pud gets the size right, and allows
57375 * us to conceptually access the pud entry that this pmd is folded into
57376@@ -16,11 +21,6 @@ struct mm_struct;
57377 */
57378 typedef struct { pud_t pud; } pmd_t;
57379
57380-#define PMD_SHIFT PUD_SHIFT
57381-#define PTRS_PER_PMD 1
57382-#define PMD_SIZE (1UL << PMD_SHIFT)
57383-#define PMD_MASK (~(PMD_SIZE-1))
57384-
57385 /*
57386 * The "pud_xxx()" functions here are trivial for a folded two-level
57387 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57388diff -urNp linux-2.6.32.45/include/asm-generic/pgtable-nopud.h linux-2.6.32.45/include/asm-generic/pgtable-nopud.h
57389--- linux-2.6.32.45/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
57390+++ linux-2.6.32.45/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
57391@@ -1,10 +1,15 @@
57392 #ifndef _PGTABLE_NOPUD_H
57393 #define _PGTABLE_NOPUD_H
57394
57395-#ifndef __ASSEMBLY__
57396-
57397 #define __PAGETABLE_PUD_FOLDED
57398
57399+#define PUD_SHIFT PGDIR_SHIFT
57400+#define PTRS_PER_PUD 1
57401+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57402+#define PUD_MASK (~(PUD_SIZE-1))
57403+
57404+#ifndef __ASSEMBLY__
57405+
57406 /*
57407 * Having the pud type consist of a pgd gets the size right, and allows
57408 * us to conceptually access the pgd entry that this pud is folded into
57409@@ -12,11 +17,6 @@
57410 */
57411 typedef struct { pgd_t pgd; } pud_t;
57412
57413-#define PUD_SHIFT PGDIR_SHIFT
57414-#define PTRS_PER_PUD 1
57415-#define PUD_SIZE (1UL << PUD_SHIFT)
57416-#define PUD_MASK (~(PUD_SIZE-1))
57417-
57418 /*
57419 * The "pgd_xxx()" functions here are trivial for a folded two-level
57420 * setup: the pud is never bad, and a pud always exists (as it's folded
57421diff -urNp linux-2.6.32.45/include/asm-generic/vmlinux.lds.h linux-2.6.32.45/include/asm-generic/vmlinux.lds.h
57422--- linux-2.6.32.45/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
57423+++ linux-2.6.32.45/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
57424@@ -199,6 +199,7 @@
57425 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57426 VMLINUX_SYMBOL(__start_rodata) = .; \
57427 *(.rodata) *(.rodata.*) \
57428+ *(.data.read_only) \
57429 *(__vermagic) /* Kernel version magic */ \
57430 *(__markers_strings) /* Markers: strings */ \
57431 *(__tracepoints_strings)/* Tracepoints: strings */ \
57432@@ -656,22 +657,24 @@
57433 * section in the linker script will go there too. @phdr should have
57434 * a leading colon.
57435 *
57436- * Note that this macros defines __per_cpu_load as an absolute symbol.
57437+ * Note that this macros defines per_cpu_load as an absolute symbol.
57438 * If there is no need to put the percpu section at a predetermined
57439 * address, use PERCPU().
57440 */
57441 #define PERCPU_VADDR(vaddr, phdr) \
57442- VMLINUX_SYMBOL(__per_cpu_load) = .; \
57443- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57444+ per_cpu_load = .; \
57445+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57446 - LOAD_OFFSET) { \
57447+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57448 VMLINUX_SYMBOL(__per_cpu_start) = .; \
57449 *(.data.percpu.first) \
57450- *(.data.percpu.page_aligned) \
57451 *(.data.percpu) \
57452+ . = ALIGN(PAGE_SIZE); \
57453+ *(.data.percpu.page_aligned) \
57454 *(.data.percpu.shared_aligned) \
57455 VMLINUX_SYMBOL(__per_cpu_end) = .; \
57456 } phdr \
57457- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
57458+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
57459
57460 /**
57461 * PERCPU - define output section for percpu area, simple version
57462diff -urNp linux-2.6.32.45/include/drm/drm_crtc_helper.h linux-2.6.32.45/include/drm/drm_crtc_helper.h
57463--- linux-2.6.32.45/include/drm/drm_crtc_helper.h 2011-03-27 14:31:47.000000000 -0400
57464+++ linux-2.6.32.45/include/drm/drm_crtc_helper.h 2011-08-05 20:33:55.000000000 -0400
57465@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
57466
57467 /* reload the current crtc LUT */
57468 void (*load_lut)(struct drm_crtc *crtc);
57469-};
57470+} __no_const;
57471
57472 struct drm_encoder_helper_funcs {
57473 void (*dpms)(struct drm_encoder *encoder, int mode);
57474@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
57475 struct drm_connector *connector);
57476 /* disable encoder when not in use - more explicit than dpms off */
57477 void (*disable)(struct drm_encoder *encoder);
57478-};
57479+} __no_const;
57480
57481 struct drm_connector_helper_funcs {
57482 int (*get_modes)(struct drm_connector *connector);
57483diff -urNp linux-2.6.32.45/include/drm/drmP.h linux-2.6.32.45/include/drm/drmP.h
57484--- linux-2.6.32.45/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
57485+++ linux-2.6.32.45/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
57486@@ -71,6 +71,7 @@
57487 #include <linux/workqueue.h>
57488 #include <linux/poll.h>
57489 #include <asm/pgalloc.h>
57490+#include <asm/local.h>
57491 #include "drm.h"
57492
57493 #include <linux/idr.h>
57494@@ -814,7 +815,7 @@ struct drm_driver {
57495 void (*vgaarb_irq)(struct drm_device *dev, bool state);
57496
57497 /* Driver private ops for this object */
57498- struct vm_operations_struct *gem_vm_ops;
57499+ const struct vm_operations_struct *gem_vm_ops;
57500
57501 int major;
57502 int minor;
57503@@ -917,7 +918,7 @@ struct drm_device {
57504
57505 /** \name Usage Counters */
57506 /*@{ */
57507- int open_count; /**< Outstanding files open */
57508+ local_t open_count; /**< Outstanding files open */
57509 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57510 atomic_t vma_count; /**< Outstanding vma areas open */
57511 int buf_use; /**< Buffers in use -- cannot alloc */
57512@@ -928,7 +929,7 @@ struct drm_device {
57513 /*@{ */
57514 unsigned long counters;
57515 enum drm_stat_type types[15];
57516- atomic_t counts[15];
57517+ atomic_unchecked_t counts[15];
57518 /*@} */
57519
57520 struct list_head filelist;
57521@@ -1016,7 +1017,7 @@ struct drm_device {
57522 struct pci_controller *hose;
57523 #endif
57524 struct drm_sg_mem *sg; /**< Scatter gather memory */
57525- unsigned int num_crtcs; /**< Number of CRTCs on this device */
57526+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
57527 void *dev_private; /**< device private data */
57528 void *mm_private;
57529 struct address_space *dev_mapping;
57530@@ -1042,11 +1043,11 @@ struct drm_device {
57531 spinlock_t object_name_lock;
57532 struct idr object_name_idr;
57533 atomic_t object_count;
57534- atomic_t object_memory;
57535+ atomic_unchecked_t object_memory;
57536 atomic_t pin_count;
57537- atomic_t pin_memory;
57538+ atomic_unchecked_t pin_memory;
57539 atomic_t gtt_count;
57540- atomic_t gtt_memory;
57541+ atomic_unchecked_t gtt_memory;
57542 uint32_t gtt_total;
57543 uint32_t invalidate_domains; /* domains pending invalidation */
57544 uint32_t flush_domains; /* domains pending flush */
57545diff -urNp linux-2.6.32.45/include/drm/ttm/ttm_memory.h linux-2.6.32.45/include/drm/ttm/ttm_memory.h
57546--- linux-2.6.32.45/include/drm/ttm/ttm_memory.h 2011-03-27 14:31:47.000000000 -0400
57547+++ linux-2.6.32.45/include/drm/ttm/ttm_memory.h 2011-08-05 20:33:55.000000000 -0400
57548@@ -47,7 +47,7 @@
57549
57550 struct ttm_mem_shrink {
57551 int (*do_shrink) (struct ttm_mem_shrink *);
57552-};
57553+} __no_const;
57554
57555 /**
57556 * struct ttm_mem_global - Global memory accounting structure.
57557diff -urNp linux-2.6.32.45/include/linux/a.out.h linux-2.6.32.45/include/linux/a.out.h
57558--- linux-2.6.32.45/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
57559+++ linux-2.6.32.45/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
57560@@ -39,6 +39,14 @@ enum machine_type {
57561 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57562 };
57563
57564+/* Constants for the N_FLAGS field */
57565+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57566+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57567+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57568+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57569+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57570+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57571+
57572 #if !defined (N_MAGIC)
57573 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57574 #endif
57575diff -urNp linux-2.6.32.45/include/linux/atmdev.h linux-2.6.32.45/include/linux/atmdev.h
57576--- linux-2.6.32.45/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
57577+++ linux-2.6.32.45/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
57578@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57579 #endif
57580
57581 struct k_atm_aal_stats {
57582-#define __HANDLE_ITEM(i) atomic_t i
57583+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57584 __AAL_STAT_ITEMS
57585 #undef __HANDLE_ITEM
57586 };
57587diff -urNp linux-2.6.32.45/include/linux/backlight.h linux-2.6.32.45/include/linux/backlight.h
57588--- linux-2.6.32.45/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
57589+++ linux-2.6.32.45/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
57590@@ -36,18 +36,18 @@ struct backlight_device;
57591 struct fb_info;
57592
57593 struct backlight_ops {
57594- unsigned int options;
57595+ const unsigned int options;
57596
57597 #define BL_CORE_SUSPENDRESUME (1 << 0)
57598
57599 /* Notify the backlight driver some property has changed */
57600- int (*update_status)(struct backlight_device *);
57601+ int (* const update_status)(struct backlight_device *);
57602 /* Return the current backlight brightness (accounting for power,
57603 fb_blank etc.) */
57604- int (*get_brightness)(struct backlight_device *);
57605+ int (* const get_brightness)(struct backlight_device *);
57606 /* Check if given framebuffer device is the one bound to this backlight;
57607 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
57608- int (*check_fb)(struct fb_info *);
57609+ int (* const check_fb)(struct fb_info *);
57610 };
57611
57612 /* This structure defines all the properties of a backlight */
57613@@ -86,7 +86,7 @@ struct backlight_device {
57614 registered this device has been unloaded, and if class_get_devdata()
57615 points to something in the body of that driver, it is also invalid. */
57616 struct mutex ops_lock;
57617- struct backlight_ops *ops;
57618+ const struct backlight_ops *ops;
57619
57620 /* The framebuffer notifier block */
57621 struct notifier_block fb_notif;
57622@@ -103,7 +103,7 @@ static inline void backlight_update_stat
57623 }
57624
57625 extern struct backlight_device *backlight_device_register(const char *name,
57626- struct device *dev, void *devdata, struct backlight_ops *ops);
57627+ struct device *dev, void *devdata, const struct backlight_ops *ops);
57628 extern void backlight_device_unregister(struct backlight_device *bd);
57629 extern void backlight_force_update(struct backlight_device *bd,
57630 enum backlight_update_reason reason);
57631diff -urNp linux-2.6.32.45/include/linux/binfmts.h linux-2.6.32.45/include/linux/binfmts.h
57632--- linux-2.6.32.45/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
57633+++ linux-2.6.32.45/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
57634@@ -83,6 +83,7 @@ struct linux_binfmt {
57635 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57636 int (*load_shlib)(struct file *);
57637 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
57638+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57639 unsigned long min_coredump; /* minimal dump size */
57640 int hasvdso;
57641 };
57642diff -urNp linux-2.6.32.45/include/linux/blkdev.h linux-2.6.32.45/include/linux/blkdev.h
57643--- linux-2.6.32.45/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
57644+++ linux-2.6.32.45/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
57645@@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
57646 #endif /* CONFIG_BLK_DEV_INTEGRITY */
57647
57648 struct block_device_operations {
57649- int (*open) (struct block_device *, fmode_t);
57650- int (*release) (struct gendisk *, fmode_t);
57651- int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57652- int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57653- int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57654- int (*direct_access) (struct block_device *, sector_t,
57655+ int (* const open) (struct block_device *, fmode_t);
57656+ int (* const release) (struct gendisk *, fmode_t);
57657+ int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57658+ int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57659+ int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57660+ int (* const direct_access) (struct block_device *, sector_t,
57661 void **, unsigned long *);
57662- int (*media_changed) (struct gendisk *);
57663- unsigned long long (*set_capacity) (struct gendisk *,
57664+ int (* const media_changed) (struct gendisk *);
57665+ unsigned long long (* const set_capacity) (struct gendisk *,
57666 unsigned long long);
57667- int (*revalidate_disk) (struct gendisk *);
57668- int (*getgeo)(struct block_device *, struct hd_geometry *);
57669- struct module *owner;
57670+ int (* const revalidate_disk) (struct gendisk *);
57671+ int (*const getgeo)(struct block_device *, struct hd_geometry *);
57672+ struct module * const owner;
57673 };
57674
57675 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57676diff -urNp linux-2.6.32.45/include/linux/blktrace_api.h linux-2.6.32.45/include/linux/blktrace_api.h
57677--- linux-2.6.32.45/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
57678+++ linux-2.6.32.45/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
57679@@ -160,7 +160,7 @@ struct blk_trace {
57680 struct dentry *dir;
57681 struct dentry *dropped_file;
57682 struct dentry *msg_file;
57683- atomic_t dropped;
57684+ atomic_unchecked_t dropped;
57685 };
57686
57687 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57688diff -urNp linux-2.6.32.45/include/linux/byteorder/little_endian.h linux-2.6.32.45/include/linux/byteorder/little_endian.h
57689--- linux-2.6.32.45/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
57690+++ linux-2.6.32.45/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
57691@@ -42,51 +42,51 @@
57692
57693 static inline __le64 __cpu_to_le64p(const __u64 *p)
57694 {
57695- return (__force __le64)*p;
57696+ return (__force const __le64)*p;
57697 }
57698 static inline __u64 __le64_to_cpup(const __le64 *p)
57699 {
57700- return (__force __u64)*p;
57701+ return (__force const __u64)*p;
57702 }
57703 static inline __le32 __cpu_to_le32p(const __u32 *p)
57704 {
57705- return (__force __le32)*p;
57706+ return (__force const __le32)*p;
57707 }
57708 static inline __u32 __le32_to_cpup(const __le32 *p)
57709 {
57710- return (__force __u32)*p;
57711+ return (__force const __u32)*p;
57712 }
57713 static inline __le16 __cpu_to_le16p(const __u16 *p)
57714 {
57715- return (__force __le16)*p;
57716+ return (__force const __le16)*p;
57717 }
57718 static inline __u16 __le16_to_cpup(const __le16 *p)
57719 {
57720- return (__force __u16)*p;
57721+ return (__force const __u16)*p;
57722 }
57723 static inline __be64 __cpu_to_be64p(const __u64 *p)
57724 {
57725- return (__force __be64)__swab64p(p);
57726+ return (__force const __be64)__swab64p(p);
57727 }
57728 static inline __u64 __be64_to_cpup(const __be64 *p)
57729 {
57730- return __swab64p((__u64 *)p);
57731+ return __swab64p((const __u64 *)p);
57732 }
57733 static inline __be32 __cpu_to_be32p(const __u32 *p)
57734 {
57735- return (__force __be32)__swab32p(p);
57736+ return (__force const __be32)__swab32p(p);
57737 }
57738 static inline __u32 __be32_to_cpup(const __be32 *p)
57739 {
57740- return __swab32p((__u32 *)p);
57741+ return __swab32p((const __u32 *)p);
57742 }
57743 static inline __be16 __cpu_to_be16p(const __u16 *p)
57744 {
57745- return (__force __be16)__swab16p(p);
57746+ return (__force const __be16)__swab16p(p);
57747 }
57748 static inline __u16 __be16_to_cpup(const __be16 *p)
57749 {
57750- return __swab16p((__u16 *)p);
57751+ return __swab16p((const __u16 *)p);
57752 }
57753 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57754 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57755diff -urNp linux-2.6.32.45/include/linux/cache.h linux-2.6.32.45/include/linux/cache.h
57756--- linux-2.6.32.45/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
57757+++ linux-2.6.32.45/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
57758@@ -16,6 +16,10 @@
57759 #define __read_mostly
57760 #endif
57761
57762+#ifndef __read_only
57763+#define __read_only __read_mostly
57764+#endif
57765+
57766 #ifndef ____cacheline_aligned
57767 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57768 #endif
57769diff -urNp linux-2.6.32.45/include/linux/capability.h linux-2.6.32.45/include/linux/capability.h
57770--- linux-2.6.32.45/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
57771+++ linux-2.6.32.45/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
57772@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
57773 (security_real_capable_noaudit((t), (cap)) == 0)
57774
57775 extern int capable(int cap);
57776+int capable_nolog(int cap);
57777
57778 /* audit system wants to get cap info from files as well */
57779 struct dentry;
57780diff -urNp linux-2.6.32.45/include/linux/compiler-gcc4.h linux-2.6.32.45/include/linux/compiler-gcc4.h
57781--- linux-2.6.32.45/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
57782+++ linux-2.6.32.45/include/linux/compiler-gcc4.h 2011-08-05 20:33:55.000000000 -0400
57783@@ -36,4 +36,13 @@
57784 the kernel context */
57785 #define __cold __attribute__((__cold__))
57786
57787+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57788+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57789+#define __bos0(ptr) __bos((ptr), 0)
57790+#define __bos1(ptr) __bos((ptr), 1)
57791+
57792+#if __GNUC_MINOR__ >= 5
57793+#define __no_const __attribute__((no_const))
57794+#endif
57795+
57796 #endif
57797diff -urNp linux-2.6.32.45/include/linux/compiler.h linux-2.6.32.45/include/linux/compiler.h
57798--- linux-2.6.32.45/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
57799+++ linux-2.6.32.45/include/linux/compiler.h 2011-08-05 20:33:55.000000000 -0400
57800@@ -247,6 +247,10 @@ void ftrace_likely_update(struct ftrace_
57801 # define __attribute_const__ /* unimplemented */
57802 #endif
57803
57804+#ifndef __no_const
57805+# define __no_const
57806+#endif
57807+
57808 /*
57809 * Tell gcc if a function is cold. The compiler will assume any path
57810 * directly leading to the call is unlikely.
57811@@ -256,6 +260,22 @@ void ftrace_likely_update(struct ftrace_
57812 #define __cold
57813 #endif
57814
57815+#ifndef __alloc_size
57816+#define __alloc_size(...)
57817+#endif
57818+
57819+#ifndef __bos
57820+#define __bos(ptr, arg)
57821+#endif
57822+
57823+#ifndef __bos0
57824+#define __bos0(ptr)
57825+#endif
57826+
57827+#ifndef __bos1
57828+#define __bos1(ptr)
57829+#endif
57830+
57831 /* Simple shorthand for a section definition */
57832 #ifndef __section
57833 # define __section(S) __attribute__ ((__section__(#S)))
57834@@ -278,6 +298,7 @@ void ftrace_likely_update(struct ftrace_
57835 * use is to mediate communication between process-level code and irq/NMI
57836 * handlers, all running on the same CPU.
57837 */
57838-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57839+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57840+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57841
57842 #endif /* __LINUX_COMPILER_H */
57843diff -urNp linux-2.6.32.45/include/linux/crypto.h linux-2.6.32.45/include/linux/crypto.h
57844--- linux-2.6.32.45/include/linux/crypto.h 2011-03-27 14:31:47.000000000 -0400
57845+++ linux-2.6.32.45/include/linux/crypto.h 2011-08-05 20:33:55.000000000 -0400
57846@@ -394,7 +394,7 @@ struct cipher_tfm {
57847 const u8 *key, unsigned int keylen);
57848 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57849 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
57850-};
57851+} __no_const;
57852
57853 struct hash_tfm {
57854 int (*init)(struct hash_desc *desc);
57855@@ -415,13 +415,13 @@ struct compress_tfm {
57856 int (*cot_decompress)(struct crypto_tfm *tfm,
57857 const u8 *src, unsigned int slen,
57858 u8 *dst, unsigned int *dlen);
57859-};
57860+} __no_const;
57861
57862 struct rng_tfm {
57863 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
57864 unsigned int dlen);
57865 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
57866-};
57867+} __no_const;
57868
57869 #define crt_ablkcipher crt_u.ablkcipher
57870 #define crt_aead crt_u.aead
57871diff -urNp linux-2.6.32.45/include/linux/dcache.h linux-2.6.32.45/include/linux/dcache.h
57872--- linux-2.6.32.45/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
57873+++ linux-2.6.32.45/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
57874@@ -119,6 +119,8 @@ struct dentry {
57875 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
57876 };
57877
57878+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
57879+
57880 /*
57881 * dentry->d_lock spinlock nesting subclasses:
57882 *
57883diff -urNp linux-2.6.32.45/include/linux/decompress/mm.h linux-2.6.32.45/include/linux/decompress/mm.h
57884--- linux-2.6.32.45/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
57885+++ linux-2.6.32.45/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
57886@@ -78,7 +78,7 @@ static void free(void *where)
57887 * warnings when not needed (indeed large_malloc / large_free are not
57888 * needed by inflate */
57889
57890-#define malloc(a) kmalloc(a, GFP_KERNEL)
57891+#define malloc(a) kmalloc((a), GFP_KERNEL)
57892 #define free(a) kfree(a)
57893
57894 #define large_malloc(a) vmalloc(a)
57895diff -urNp linux-2.6.32.45/include/linux/dma-mapping.h linux-2.6.32.45/include/linux/dma-mapping.h
57896--- linux-2.6.32.45/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
57897+++ linux-2.6.32.45/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
57898@@ -16,50 +16,50 @@ enum dma_data_direction {
57899 };
57900
57901 struct dma_map_ops {
57902- void* (*alloc_coherent)(struct device *dev, size_t size,
57903+ void* (* const alloc_coherent)(struct device *dev, size_t size,
57904 dma_addr_t *dma_handle, gfp_t gfp);
57905- void (*free_coherent)(struct device *dev, size_t size,
57906+ void (* const free_coherent)(struct device *dev, size_t size,
57907 void *vaddr, dma_addr_t dma_handle);
57908- dma_addr_t (*map_page)(struct device *dev, struct page *page,
57909+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
57910 unsigned long offset, size_t size,
57911 enum dma_data_direction dir,
57912 struct dma_attrs *attrs);
57913- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
57914+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
57915 size_t size, enum dma_data_direction dir,
57916 struct dma_attrs *attrs);
57917- int (*map_sg)(struct device *dev, struct scatterlist *sg,
57918+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
57919 int nents, enum dma_data_direction dir,
57920 struct dma_attrs *attrs);
57921- void (*unmap_sg)(struct device *dev,
57922+ void (* const unmap_sg)(struct device *dev,
57923 struct scatterlist *sg, int nents,
57924 enum dma_data_direction dir,
57925 struct dma_attrs *attrs);
57926- void (*sync_single_for_cpu)(struct device *dev,
57927+ void (* const sync_single_for_cpu)(struct device *dev,
57928 dma_addr_t dma_handle, size_t size,
57929 enum dma_data_direction dir);
57930- void (*sync_single_for_device)(struct device *dev,
57931+ void (* const sync_single_for_device)(struct device *dev,
57932 dma_addr_t dma_handle, size_t size,
57933 enum dma_data_direction dir);
57934- void (*sync_single_range_for_cpu)(struct device *dev,
57935+ void (* const sync_single_range_for_cpu)(struct device *dev,
57936 dma_addr_t dma_handle,
57937 unsigned long offset,
57938 size_t size,
57939 enum dma_data_direction dir);
57940- void (*sync_single_range_for_device)(struct device *dev,
57941+ void (* const sync_single_range_for_device)(struct device *dev,
57942 dma_addr_t dma_handle,
57943 unsigned long offset,
57944 size_t size,
57945 enum dma_data_direction dir);
57946- void (*sync_sg_for_cpu)(struct device *dev,
57947+ void (* const sync_sg_for_cpu)(struct device *dev,
57948 struct scatterlist *sg, int nents,
57949 enum dma_data_direction dir);
57950- void (*sync_sg_for_device)(struct device *dev,
57951+ void (* const sync_sg_for_device)(struct device *dev,
57952 struct scatterlist *sg, int nents,
57953 enum dma_data_direction dir);
57954- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
57955- int (*dma_supported)(struct device *dev, u64 mask);
57956+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
57957+ int (* const dma_supported)(struct device *dev, u64 mask);
57958 int (*set_dma_mask)(struct device *dev, u64 mask);
57959- int is_phys;
57960+ const int is_phys;
57961 };
57962
57963 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
57964diff -urNp linux-2.6.32.45/include/linux/dst.h linux-2.6.32.45/include/linux/dst.h
57965--- linux-2.6.32.45/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
57966+++ linux-2.6.32.45/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
57967@@ -380,7 +380,7 @@ struct dst_node
57968 struct thread_pool *pool;
57969
57970 /* Transaction IDs live here */
57971- atomic_long_t gen;
57972+ atomic_long_unchecked_t gen;
57973
57974 /*
57975 * How frequently and how many times transaction
57976diff -urNp linux-2.6.32.45/include/linux/elf.h linux-2.6.32.45/include/linux/elf.h
57977--- linux-2.6.32.45/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
57978+++ linux-2.6.32.45/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
57979@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
57980 #define PT_GNU_EH_FRAME 0x6474e550
57981
57982 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
57983+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
57984+
57985+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
57986+
57987+/* Constants for the e_flags field */
57988+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57989+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
57990+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
57991+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
57992+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57993+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57994
57995 /* These constants define the different elf file types */
57996 #define ET_NONE 0
57997@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
57998 #define DT_DEBUG 21
57999 #define DT_TEXTREL 22
58000 #define DT_JMPREL 23
58001+#define DT_FLAGS 30
58002+ #define DF_TEXTREL 0x00000004
58003 #define DT_ENCODING 32
58004 #define OLD_DT_LOOS 0x60000000
58005 #define DT_LOOS 0x6000000d
58006@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
58007 #define PF_W 0x2
58008 #define PF_X 0x1
58009
58010+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
58011+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
58012+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
58013+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
58014+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
58015+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
58016+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
58017+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
58018+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
58019+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
58020+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
58021+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
58022+
58023 typedef struct elf32_phdr{
58024 Elf32_Word p_type;
58025 Elf32_Off p_offset;
58026@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
58027 #define EI_OSABI 7
58028 #define EI_PAD 8
58029
58030+#define EI_PAX 14
58031+
58032 #define ELFMAG0 0x7f /* EI_MAG */
58033 #define ELFMAG1 'E'
58034 #define ELFMAG2 'L'
58035@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
58036 #define elf_phdr elf32_phdr
58037 #define elf_note elf32_note
58038 #define elf_addr_t Elf32_Off
58039+#define elf_dyn Elf32_Dyn
58040
58041 #else
58042
58043@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
58044 #define elf_phdr elf64_phdr
58045 #define elf_note elf64_note
58046 #define elf_addr_t Elf64_Off
58047+#define elf_dyn Elf64_Dyn
58048
58049 #endif
58050
58051diff -urNp linux-2.6.32.45/include/linux/fscache-cache.h linux-2.6.32.45/include/linux/fscache-cache.h
58052--- linux-2.6.32.45/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
58053+++ linux-2.6.32.45/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
58054@@ -116,7 +116,7 @@ struct fscache_operation {
58055 #endif
58056 };
58057
58058-extern atomic_t fscache_op_debug_id;
58059+extern atomic_unchecked_t fscache_op_debug_id;
58060 extern const struct slow_work_ops fscache_op_slow_work_ops;
58061
58062 extern void fscache_enqueue_operation(struct fscache_operation *);
58063@@ -134,7 +134,7 @@ static inline void fscache_operation_ini
58064 fscache_operation_release_t release)
58065 {
58066 atomic_set(&op->usage, 1);
58067- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58068+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58069 op->release = release;
58070 INIT_LIST_HEAD(&op->pend_link);
58071 fscache_set_op_state(op, "Init");
58072diff -urNp linux-2.6.32.45/include/linux/fs.h linux-2.6.32.45/include/linux/fs.h
58073--- linux-2.6.32.45/include/linux/fs.h 2011-07-13 17:23:04.000000000 -0400
58074+++ linux-2.6.32.45/include/linux/fs.h 2011-08-05 20:33:55.000000000 -0400
58075@@ -90,6 +90,11 @@ struct inodes_stat_t {
58076 /* Expect random access pattern */
58077 #define FMODE_RANDOM ((__force fmode_t)4096)
58078
58079+/* Hack for grsec so as not to require read permission simply to execute
58080+ * a binary
58081+ */
58082+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
58083+
58084 /*
58085 * The below are the various read and write types that we support. Some of
58086 * them include behavioral modifiers that send information down to the
58087@@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
58088 unsigned long, unsigned long);
58089
58090 struct address_space_operations {
58091- int (*writepage)(struct page *page, struct writeback_control *wbc);
58092- int (*readpage)(struct file *, struct page *);
58093- void (*sync_page)(struct page *);
58094+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
58095+ int (* const readpage)(struct file *, struct page *);
58096+ void (* const sync_page)(struct page *);
58097
58098 /* Write back some dirty pages from this mapping. */
58099- int (*writepages)(struct address_space *, struct writeback_control *);
58100+ int (* const writepages)(struct address_space *, struct writeback_control *);
58101
58102 /* Set a page dirty. Return true if this dirtied it */
58103- int (*set_page_dirty)(struct page *page);
58104+ int (* const set_page_dirty)(struct page *page);
58105
58106- int (*readpages)(struct file *filp, struct address_space *mapping,
58107+ int (* const readpages)(struct file *filp, struct address_space *mapping,
58108 struct list_head *pages, unsigned nr_pages);
58109
58110- int (*write_begin)(struct file *, struct address_space *mapping,
58111+ int (* const write_begin)(struct file *, struct address_space *mapping,
58112 loff_t pos, unsigned len, unsigned flags,
58113 struct page **pagep, void **fsdata);
58114- int (*write_end)(struct file *, struct address_space *mapping,
58115+ int (* const write_end)(struct file *, struct address_space *mapping,
58116 loff_t pos, unsigned len, unsigned copied,
58117 struct page *page, void *fsdata);
58118
58119 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
58120- sector_t (*bmap)(struct address_space *, sector_t);
58121- void (*invalidatepage) (struct page *, unsigned long);
58122- int (*releasepage) (struct page *, gfp_t);
58123- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
58124+ sector_t (* const bmap)(struct address_space *, sector_t);
58125+ void (* const invalidatepage) (struct page *, unsigned long);
58126+ int (* const releasepage) (struct page *, gfp_t);
58127+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
58128 loff_t offset, unsigned long nr_segs);
58129- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
58130+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
58131 void **, unsigned long *);
58132 /* migrate the contents of a page to the specified target */
58133- int (*migratepage) (struct address_space *,
58134+ int (* const migratepage) (struct address_space *,
58135 struct page *, struct page *);
58136- int (*launder_page) (struct page *);
58137- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
58138+ int (* const launder_page) (struct page *);
58139+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
58140 unsigned long);
58141- int (*error_remove_page)(struct address_space *, struct page *);
58142+ int (* const error_remove_page)(struct address_space *, struct page *);
58143 };
58144
58145 /*
58146@@ -1031,19 +1036,19 @@ static inline int file_check_writeable(s
58147 typedef struct files_struct *fl_owner_t;
58148
58149 struct file_lock_operations {
58150- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
58151- void (*fl_release_private)(struct file_lock *);
58152+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
58153+ void (* const fl_release_private)(struct file_lock *);
58154 };
58155
58156 struct lock_manager_operations {
58157- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
58158- void (*fl_notify)(struct file_lock *); /* unblock callback */
58159- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
58160- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
58161- void (*fl_release_private)(struct file_lock *);
58162- void (*fl_break)(struct file_lock *);
58163- int (*fl_mylease)(struct file_lock *, struct file_lock *);
58164- int (*fl_change)(struct file_lock **, int);
58165+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
58166+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
58167+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
58168+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
58169+ void (* const fl_release_private)(struct file_lock *);
58170+ void (* const fl_break)(struct file_lock *);
58171+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
58172+ int (* const fl_change)(struct file_lock **, int);
58173 };
58174
58175 struct lock_manager {
58176@@ -1442,7 +1447,7 @@ struct fiemap_extent_info {
58177 unsigned int fi_flags; /* Flags as passed from user */
58178 unsigned int fi_extents_mapped; /* Number of mapped extents */
58179 unsigned int fi_extents_max; /* Size of fiemap_extent array */
58180- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
58181+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
58182 * array */
58183 };
58184 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
58185@@ -1486,7 +1491,7 @@ struct block_device_operations;
58186 * can be called without the big kernel lock held in all filesystems.
58187 */
58188 struct file_operations {
58189- struct module *owner;
58190+ struct module * const owner;
58191 loff_t (*llseek) (struct file *, loff_t, int);
58192 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
58193 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
58194@@ -1559,30 +1564,30 @@ extern ssize_t vfs_writev(struct file *,
58195 unsigned long, loff_t *);
58196
58197 struct super_operations {
58198- struct inode *(*alloc_inode)(struct super_block *sb);
58199- void (*destroy_inode)(struct inode *);
58200+ struct inode *(* const alloc_inode)(struct super_block *sb);
58201+ void (* const destroy_inode)(struct inode *);
58202
58203- void (*dirty_inode) (struct inode *);
58204- int (*write_inode) (struct inode *, int);
58205- void (*drop_inode) (struct inode *);
58206- void (*delete_inode) (struct inode *);
58207- void (*put_super) (struct super_block *);
58208- void (*write_super) (struct super_block *);
58209- int (*sync_fs)(struct super_block *sb, int wait);
58210- int (*freeze_fs) (struct super_block *);
58211- int (*unfreeze_fs) (struct super_block *);
58212- int (*statfs) (struct dentry *, struct kstatfs *);
58213- int (*remount_fs) (struct super_block *, int *, char *);
58214- void (*clear_inode) (struct inode *);
58215- void (*umount_begin) (struct super_block *);
58216+ void (* const dirty_inode) (struct inode *);
58217+ int (* const write_inode) (struct inode *, int);
58218+ void (* const drop_inode) (struct inode *);
58219+ void (* const delete_inode) (struct inode *);
58220+ void (* const put_super) (struct super_block *);
58221+ void (* const write_super) (struct super_block *);
58222+ int (* const sync_fs)(struct super_block *sb, int wait);
58223+ int (* const freeze_fs) (struct super_block *);
58224+ int (* const unfreeze_fs) (struct super_block *);
58225+ int (* const statfs) (struct dentry *, struct kstatfs *);
58226+ int (* const remount_fs) (struct super_block *, int *, char *);
58227+ void (* const clear_inode) (struct inode *);
58228+ void (* const umount_begin) (struct super_block *);
58229
58230- int (*show_options)(struct seq_file *, struct vfsmount *);
58231- int (*show_stats)(struct seq_file *, struct vfsmount *);
58232+ int (* const show_options)(struct seq_file *, struct vfsmount *);
58233+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
58234 #ifdef CONFIG_QUOTA
58235- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
58236- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
58237+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
58238+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
58239 #endif
58240- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
58241+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
58242 };
58243
58244 /*
58245diff -urNp linux-2.6.32.45/include/linux/fs_struct.h linux-2.6.32.45/include/linux/fs_struct.h
58246--- linux-2.6.32.45/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
58247+++ linux-2.6.32.45/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
58248@@ -4,7 +4,7 @@
58249 #include <linux/path.h>
58250
58251 struct fs_struct {
58252- int users;
58253+ atomic_t users;
58254 rwlock_t lock;
58255 int umask;
58256 int in_exec;
58257diff -urNp linux-2.6.32.45/include/linux/ftrace_event.h linux-2.6.32.45/include/linux/ftrace_event.h
58258--- linux-2.6.32.45/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
58259+++ linux-2.6.32.45/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
58260@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
58261 int filter_type);
58262 extern int trace_define_common_fields(struct ftrace_event_call *call);
58263
58264-#define is_signed_type(type) (((type)(-1)) < 0)
58265+#define is_signed_type(type) (((type)(-1)) < (type)1)
58266
58267 int trace_set_clr_event(const char *system, const char *event, int set);
58268
58269diff -urNp linux-2.6.32.45/include/linux/genhd.h linux-2.6.32.45/include/linux/genhd.h
58270--- linux-2.6.32.45/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
58271+++ linux-2.6.32.45/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
58272@@ -161,7 +161,7 @@ struct gendisk {
58273
58274 struct timer_rand_state *random;
58275
58276- atomic_t sync_io; /* RAID */
58277+ atomic_unchecked_t sync_io; /* RAID */
58278 struct work_struct async_notify;
58279 #ifdef CONFIG_BLK_DEV_INTEGRITY
58280 struct blk_integrity *integrity;
58281diff -urNp linux-2.6.32.45/include/linux/gracl.h linux-2.6.32.45/include/linux/gracl.h
58282--- linux-2.6.32.45/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
58283+++ linux-2.6.32.45/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
58284@@ -0,0 +1,317 @@
58285+#ifndef GR_ACL_H
58286+#define GR_ACL_H
58287+
58288+#include <linux/grdefs.h>
58289+#include <linux/resource.h>
58290+#include <linux/capability.h>
58291+#include <linux/dcache.h>
58292+#include <asm/resource.h>
58293+
58294+/* Major status information */
58295+
58296+#define GR_VERSION "grsecurity 2.2.2"
58297+#define GRSECURITY_VERSION 0x2202
58298+
58299+enum {
58300+ GR_SHUTDOWN = 0,
58301+ GR_ENABLE = 1,
58302+ GR_SPROLE = 2,
58303+ GR_RELOAD = 3,
58304+ GR_SEGVMOD = 4,
58305+ GR_STATUS = 5,
58306+ GR_UNSPROLE = 6,
58307+ GR_PASSSET = 7,
58308+ GR_SPROLEPAM = 8,
58309+};
58310+
58311+/* Password setup definitions
58312+ * kernel/grhash.c */
58313+enum {
58314+ GR_PW_LEN = 128,
58315+ GR_SALT_LEN = 16,
58316+ GR_SHA_LEN = 32,
58317+};
58318+
58319+enum {
58320+ GR_SPROLE_LEN = 64,
58321+};
58322+
58323+enum {
58324+ GR_NO_GLOB = 0,
58325+ GR_REG_GLOB,
58326+ GR_CREATE_GLOB
58327+};
58328+
58329+#define GR_NLIMITS 32
58330+
58331+/* Begin Data Structures */
58332+
58333+struct sprole_pw {
58334+ unsigned char *rolename;
58335+ unsigned char salt[GR_SALT_LEN];
58336+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58337+};
58338+
58339+struct name_entry {
58340+ __u32 key;
58341+ ino_t inode;
58342+ dev_t device;
58343+ char *name;
58344+ __u16 len;
58345+ __u8 deleted;
58346+ struct name_entry *prev;
58347+ struct name_entry *next;
58348+};
58349+
58350+struct inodev_entry {
58351+ struct name_entry *nentry;
58352+ struct inodev_entry *prev;
58353+ struct inodev_entry *next;
58354+};
58355+
58356+struct acl_role_db {
58357+ struct acl_role_label **r_hash;
58358+ __u32 r_size;
58359+};
58360+
58361+struct inodev_db {
58362+ struct inodev_entry **i_hash;
58363+ __u32 i_size;
58364+};
58365+
58366+struct name_db {
58367+ struct name_entry **n_hash;
58368+ __u32 n_size;
58369+};
58370+
58371+struct crash_uid {
58372+ uid_t uid;
58373+ unsigned long expires;
58374+};
58375+
58376+struct gr_hash_struct {
58377+ void **table;
58378+ void **nametable;
58379+ void *first;
58380+ __u32 table_size;
58381+ __u32 used_size;
58382+ int type;
58383+};
58384+
58385+/* Userspace Grsecurity ACL data structures */
58386+
58387+struct acl_subject_label {
58388+ char *filename;
58389+ ino_t inode;
58390+ dev_t device;
58391+ __u32 mode;
58392+ kernel_cap_t cap_mask;
58393+ kernel_cap_t cap_lower;
58394+ kernel_cap_t cap_invert_audit;
58395+
58396+ struct rlimit res[GR_NLIMITS];
58397+ __u32 resmask;
58398+
58399+ __u8 user_trans_type;
58400+ __u8 group_trans_type;
58401+ uid_t *user_transitions;
58402+ gid_t *group_transitions;
58403+ __u16 user_trans_num;
58404+ __u16 group_trans_num;
58405+
58406+ __u32 sock_families[2];
58407+ __u32 ip_proto[8];
58408+ __u32 ip_type;
58409+ struct acl_ip_label **ips;
58410+ __u32 ip_num;
58411+ __u32 inaddr_any_override;
58412+
58413+ __u32 crashes;
58414+ unsigned long expires;
58415+
58416+ struct acl_subject_label *parent_subject;
58417+ struct gr_hash_struct *hash;
58418+ struct acl_subject_label *prev;
58419+ struct acl_subject_label *next;
58420+
58421+ struct acl_object_label **obj_hash;
58422+ __u32 obj_hash_size;
58423+ __u16 pax_flags;
58424+};
58425+
58426+struct role_allowed_ip {
58427+ __u32 addr;
58428+ __u32 netmask;
58429+
58430+ struct role_allowed_ip *prev;
58431+ struct role_allowed_ip *next;
58432+};
58433+
58434+struct role_transition {
58435+ char *rolename;
58436+
58437+ struct role_transition *prev;
58438+ struct role_transition *next;
58439+};
58440+
58441+struct acl_role_label {
58442+ char *rolename;
58443+ uid_t uidgid;
58444+ __u16 roletype;
58445+
58446+ __u16 auth_attempts;
58447+ unsigned long expires;
58448+
58449+ struct acl_subject_label *root_label;
58450+ struct gr_hash_struct *hash;
58451+
58452+ struct acl_role_label *prev;
58453+ struct acl_role_label *next;
58454+
58455+ struct role_transition *transitions;
58456+ struct role_allowed_ip *allowed_ips;
58457+ uid_t *domain_children;
58458+ __u16 domain_child_num;
58459+
58460+ struct acl_subject_label **subj_hash;
58461+ __u32 subj_hash_size;
58462+};
58463+
58464+struct user_acl_role_db {
58465+ struct acl_role_label **r_table;
58466+ __u32 num_pointers; /* Number of allocations to track */
58467+ __u32 num_roles; /* Number of roles */
58468+ __u32 num_domain_children; /* Number of domain children */
58469+ __u32 num_subjects; /* Number of subjects */
58470+ __u32 num_objects; /* Number of objects */
58471+};
58472+
58473+struct acl_object_label {
58474+ char *filename;
58475+ ino_t inode;
58476+ dev_t device;
58477+ __u32 mode;
58478+
58479+ struct acl_subject_label *nested;
58480+ struct acl_object_label *globbed;
58481+
58482+ /* next two structures not used */
58483+
58484+ struct acl_object_label *prev;
58485+ struct acl_object_label *next;
58486+};
58487+
58488+struct acl_ip_label {
58489+ char *iface;
58490+ __u32 addr;
58491+ __u32 netmask;
58492+ __u16 low, high;
58493+ __u8 mode;
58494+ __u32 type;
58495+ __u32 proto[8];
58496+
58497+ /* next two structures not used */
58498+
58499+ struct acl_ip_label *prev;
58500+ struct acl_ip_label *next;
58501+};
58502+
58503+struct gr_arg {
58504+ struct user_acl_role_db role_db;
58505+ unsigned char pw[GR_PW_LEN];
58506+ unsigned char salt[GR_SALT_LEN];
58507+ unsigned char sum[GR_SHA_LEN];
58508+ unsigned char sp_role[GR_SPROLE_LEN];
58509+ struct sprole_pw *sprole_pws;
58510+ dev_t segv_device;
58511+ ino_t segv_inode;
58512+ uid_t segv_uid;
58513+ __u16 num_sprole_pws;
58514+ __u16 mode;
58515+};
58516+
58517+struct gr_arg_wrapper {
58518+ struct gr_arg *arg;
58519+ __u32 version;
58520+ __u32 size;
58521+};
58522+
58523+struct subject_map {
58524+ struct acl_subject_label *user;
58525+ struct acl_subject_label *kernel;
58526+ struct subject_map *prev;
58527+ struct subject_map *next;
58528+};
58529+
58530+struct acl_subj_map_db {
58531+ struct subject_map **s_hash;
58532+ __u32 s_size;
58533+};
58534+
58535+/* End Data Structures Section */
58536+
58537+/* Hash functions generated by empirical testing by Brad Spengler
58538+ Makes good use of the low bits of the inode. Generally 0-1 times
58539+ in loop for successful match. 0-3 for unsuccessful match.
58540+ Shift/add algorithm with modulus of table size and an XOR*/
58541+
58542+static __inline__ unsigned int
58543+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58544+{
58545+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58546+}
58547+
58548+ static __inline__ unsigned int
58549+shash(const struct acl_subject_label *userp, const unsigned int sz)
58550+{
58551+ return ((const unsigned long)userp % sz);
58552+}
58553+
58554+static __inline__ unsigned int
58555+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58556+{
58557+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58558+}
58559+
58560+static __inline__ unsigned int
58561+nhash(const char *name, const __u16 len, const unsigned int sz)
58562+{
58563+ return full_name_hash((const unsigned char *)name, len) % sz;
58564+}
58565+
58566+#define FOR_EACH_ROLE_START(role) \
58567+ role = role_list; \
58568+ while (role) {
58569+
58570+#define FOR_EACH_ROLE_END(role) \
58571+ role = role->prev; \
58572+ }
58573+
58574+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58575+ subj = NULL; \
58576+ iter = 0; \
58577+ while (iter < role->subj_hash_size) { \
58578+ if (subj == NULL) \
58579+ subj = role->subj_hash[iter]; \
58580+ if (subj == NULL) { \
58581+ iter++; \
58582+ continue; \
58583+ }
58584+
58585+#define FOR_EACH_SUBJECT_END(subj,iter) \
58586+ subj = subj->next; \
58587+ if (subj == NULL) \
58588+ iter++; \
58589+ }
58590+
58591+
58592+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58593+ subj = role->hash->first; \
58594+ while (subj != NULL) {
58595+
58596+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58597+ subj = subj->next; \
58598+ }
58599+
58600+#endif
58601+
58602diff -urNp linux-2.6.32.45/include/linux/gralloc.h linux-2.6.32.45/include/linux/gralloc.h
58603--- linux-2.6.32.45/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
58604+++ linux-2.6.32.45/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
58605@@ -0,0 +1,9 @@
58606+#ifndef __GRALLOC_H
58607+#define __GRALLOC_H
58608+
58609+void acl_free_all(void);
58610+int acl_alloc_stack_init(unsigned long size);
58611+void *acl_alloc(unsigned long len);
58612+void *acl_alloc_num(unsigned long num, unsigned long len);
58613+
58614+#endif
58615diff -urNp linux-2.6.32.45/include/linux/grdefs.h linux-2.6.32.45/include/linux/grdefs.h
58616--- linux-2.6.32.45/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
58617+++ linux-2.6.32.45/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
58618@@ -0,0 +1,140 @@
58619+#ifndef GRDEFS_H
58620+#define GRDEFS_H
58621+
58622+/* Begin grsecurity status declarations */
58623+
58624+enum {
58625+ GR_READY = 0x01,
58626+ GR_STATUS_INIT = 0x00 // disabled state
58627+};
58628+
58629+/* Begin ACL declarations */
58630+
58631+/* Role flags */
58632+
58633+enum {
58634+ GR_ROLE_USER = 0x0001,
58635+ GR_ROLE_GROUP = 0x0002,
58636+ GR_ROLE_DEFAULT = 0x0004,
58637+ GR_ROLE_SPECIAL = 0x0008,
58638+ GR_ROLE_AUTH = 0x0010,
58639+ GR_ROLE_NOPW = 0x0020,
58640+ GR_ROLE_GOD = 0x0040,
58641+ GR_ROLE_LEARN = 0x0080,
58642+ GR_ROLE_TPE = 0x0100,
58643+ GR_ROLE_DOMAIN = 0x0200,
58644+ GR_ROLE_PAM = 0x0400,
58645+ GR_ROLE_PERSIST = 0x800
58646+};
58647+
58648+/* ACL Subject and Object mode flags */
58649+enum {
58650+ GR_DELETED = 0x80000000
58651+};
58652+
58653+/* ACL Object-only mode flags */
58654+enum {
58655+ GR_READ = 0x00000001,
58656+ GR_APPEND = 0x00000002,
58657+ GR_WRITE = 0x00000004,
58658+ GR_EXEC = 0x00000008,
58659+ GR_FIND = 0x00000010,
58660+ GR_INHERIT = 0x00000020,
58661+ GR_SETID = 0x00000040,
58662+ GR_CREATE = 0x00000080,
58663+ GR_DELETE = 0x00000100,
58664+ GR_LINK = 0x00000200,
58665+ GR_AUDIT_READ = 0x00000400,
58666+ GR_AUDIT_APPEND = 0x00000800,
58667+ GR_AUDIT_WRITE = 0x00001000,
58668+ GR_AUDIT_EXEC = 0x00002000,
58669+ GR_AUDIT_FIND = 0x00004000,
58670+ GR_AUDIT_INHERIT= 0x00008000,
58671+ GR_AUDIT_SETID = 0x00010000,
58672+ GR_AUDIT_CREATE = 0x00020000,
58673+ GR_AUDIT_DELETE = 0x00040000,
58674+ GR_AUDIT_LINK = 0x00080000,
58675+ GR_PTRACERD = 0x00100000,
58676+ GR_NOPTRACE = 0x00200000,
58677+ GR_SUPPRESS = 0x00400000,
58678+ GR_NOLEARN = 0x00800000,
58679+ GR_INIT_TRANSFER= 0x01000000
58680+};
58681+
58682+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58683+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58684+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58685+
58686+/* ACL subject-only mode flags */
58687+enum {
58688+ GR_KILL = 0x00000001,
58689+ GR_VIEW = 0x00000002,
58690+ GR_PROTECTED = 0x00000004,
58691+ GR_LEARN = 0x00000008,
58692+ GR_OVERRIDE = 0x00000010,
58693+ /* just a placeholder, this mode is only used in userspace */
58694+ GR_DUMMY = 0x00000020,
58695+ GR_PROTSHM = 0x00000040,
58696+ GR_KILLPROC = 0x00000080,
58697+ GR_KILLIPPROC = 0x00000100,
58698+ /* just a placeholder, this mode is only used in userspace */
58699+ GR_NOTROJAN = 0x00000200,
58700+ GR_PROTPROCFD = 0x00000400,
58701+ GR_PROCACCT = 0x00000800,
58702+ GR_RELAXPTRACE = 0x00001000,
58703+ GR_NESTED = 0x00002000,
58704+ GR_INHERITLEARN = 0x00004000,
58705+ GR_PROCFIND = 0x00008000,
58706+ GR_POVERRIDE = 0x00010000,
58707+ GR_KERNELAUTH = 0x00020000,
58708+ GR_ATSECURE = 0x00040000,
58709+ GR_SHMEXEC = 0x00080000
58710+};
58711+
58712+enum {
58713+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58714+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58715+ GR_PAX_ENABLE_MPROTECT = 0x0004,
58716+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
58717+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58718+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58719+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58720+ GR_PAX_DISABLE_MPROTECT = 0x0400,
58721+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
58722+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58723+};
58724+
58725+enum {
58726+ GR_ID_USER = 0x01,
58727+ GR_ID_GROUP = 0x02,
58728+};
58729+
58730+enum {
58731+ GR_ID_ALLOW = 0x01,
58732+ GR_ID_DENY = 0x02,
58733+};
58734+
58735+#define GR_CRASH_RES 31
58736+#define GR_UIDTABLE_MAX 500
58737+
58738+/* begin resource learning section */
58739+enum {
58740+ GR_RLIM_CPU_BUMP = 60,
58741+ GR_RLIM_FSIZE_BUMP = 50000,
58742+ GR_RLIM_DATA_BUMP = 10000,
58743+ GR_RLIM_STACK_BUMP = 1000,
58744+ GR_RLIM_CORE_BUMP = 10000,
58745+ GR_RLIM_RSS_BUMP = 500000,
58746+ GR_RLIM_NPROC_BUMP = 1,
58747+ GR_RLIM_NOFILE_BUMP = 5,
58748+ GR_RLIM_MEMLOCK_BUMP = 50000,
58749+ GR_RLIM_AS_BUMP = 500000,
58750+ GR_RLIM_LOCKS_BUMP = 2,
58751+ GR_RLIM_SIGPENDING_BUMP = 5,
58752+ GR_RLIM_MSGQUEUE_BUMP = 10000,
58753+ GR_RLIM_NICE_BUMP = 1,
58754+ GR_RLIM_RTPRIO_BUMP = 1,
58755+ GR_RLIM_RTTIME_BUMP = 1000000
58756+};
58757+
58758+#endif
58759diff -urNp linux-2.6.32.45/include/linux/grinternal.h linux-2.6.32.45/include/linux/grinternal.h
58760--- linux-2.6.32.45/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
58761+++ linux-2.6.32.45/include/linux/grinternal.h 2011-08-11 19:58:37.000000000 -0400
58762@@ -0,0 +1,217 @@
58763+#ifndef __GRINTERNAL_H
58764+#define __GRINTERNAL_H
58765+
58766+#ifdef CONFIG_GRKERNSEC
58767+
58768+#include <linux/fs.h>
58769+#include <linux/mnt_namespace.h>
58770+#include <linux/nsproxy.h>
58771+#include <linux/gracl.h>
58772+#include <linux/grdefs.h>
58773+#include <linux/grmsg.h>
58774+
58775+void gr_add_learn_entry(const char *fmt, ...)
58776+ __attribute__ ((format (printf, 1, 2)));
58777+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58778+ const struct vfsmount *mnt);
58779+__u32 gr_check_create(const struct dentry *new_dentry,
58780+ const struct dentry *parent,
58781+ const struct vfsmount *mnt, const __u32 mode);
58782+int gr_check_protected_task(const struct task_struct *task);
58783+__u32 to_gr_audit(const __u32 reqmode);
58784+int gr_set_acls(const int type);
58785+int gr_apply_subject_to_task(struct task_struct *task);
58786+int gr_acl_is_enabled(void);
58787+char gr_roletype_to_char(void);
58788+
58789+void gr_handle_alertkill(struct task_struct *task);
58790+char *gr_to_filename(const struct dentry *dentry,
58791+ const struct vfsmount *mnt);
58792+char *gr_to_filename1(const struct dentry *dentry,
58793+ const struct vfsmount *mnt);
58794+char *gr_to_filename2(const struct dentry *dentry,
58795+ const struct vfsmount *mnt);
58796+char *gr_to_filename3(const struct dentry *dentry,
58797+ const struct vfsmount *mnt);
58798+
58799+extern int grsec_enable_harden_ptrace;
58800+extern int grsec_enable_link;
58801+extern int grsec_enable_fifo;
58802+extern int grsec_enable_shm;
58803+extern int grsec_enable_execlog;
58804+extern int grsec_enable_signal;
58805+extern int grsec_enable_audit_ptrace;
58806+extern int grsec_enable_forkfail;
58807+extern int grsec_enable_time;
58808+extern int grsec_enable_rofs;
58809+extern int grsec_enable_chroot_shmat;
58810+extern int grsec_enable_chroot_mount;
58811+extern int grsec_enable_chroot_double;
58812+extern int grsec_enable_chroot_pivot;
58813+extern int grsec_enable_chroot_chdir;
58814+extern int grsec_enable_chroot_chmod;
58815+extern int grsec_enable_chroot_mknod;
58816+extern int grsec_enable_chroot_fchdir;
58817+extern int grsec_enable_chroot_nice;
58818+extern int grsec_enable_chroot_execlog;
58819+extern int grsec_enable_chroot_caps;
58820+extern int grsec_enable_chroot_sysctl;
58821+extern int grsec_enable_chroot_unix;
58822+extern int grsec_enable_tpe;
58823+extern int grsec_tpe_gid;
58824+extern int grsec_enable_tpe_all;
58825+extern int grsec_enable_tpe_invert;
58826+extern int grsec_enable_socket_all;
58827+extern int grsec_socket_all_gid;
58828+extern int grsec_enable_socket_client;
58829+extern int grsec_socket_client_gid;
58830+extern int grsec_enable_socket_server;
58831+extern int grsec_socket_server_gid;
58832+extern int grsec_audit_gid;
58833+extern int grsec_enable_group;
58834+extern int grsec_enable_audit_textrel;
58835+extern int grsec_enable_log_rwxmaps;
58836+extern int grsec_enable_mount;
58837+extern int grsec_enable_chdir;
58838+extern int grsec_resource_logging;
58839+extern int grsec_enable_blackhole;
58840+extern int grsec_lastack_retries;
58841+extern int grsec_enable_brute;
58842+extern int grsec_lock;
58843+
58844+extern spinlock_t grsec_alert_lock;
58845+extern unsigned long grsec_alert_wtime;
58846+extern unsigned long grsec_alert_fyet;
58847+
58848+extern spinlock_t grsec_audit_lock;
58849+
58850+extern rwlock_t grsec_exec_file_lock;
58851+
58852+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58853+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58854+ (tsk)->exec_file->f_vfsmnt) : "/")
58855+
58856+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58857+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58858+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58859+
58860+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58861+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
58862+ (tsk)->exec_file->f_vfsmnt) : "/")
58863+
58864+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58865+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58866+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58867+
58868+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58869+
58870+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58871+
58872+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58873+ (task)->pid, (cred)->uid, \
58874+ (cred)->euid, (cred)->gid, (cred)->egid, \
58875+ gr_parent_task_fullpath(task), \
58876+ (task)->real_parent->comm, (task)->real_parent->pid, \
58877+ (pcred)->uid, (pcred)->euid, \
58878+ (pcred)->gid, (pcred)->egid
58879+
58880+#define GR_CHROOT_CAPS {{ \
58881+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58882+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58883+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58884+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58885+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58886+ CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
58887+
58888+#define security_learn(normal_msg,args...) \
58889+({ \
58890+ read_lock(&grsec_exec_file_lock); \
58891+ gr_add_learn_entry(normal_msg "\n", ## args); \
58892+ read_unlock(&grsec_exec_file_lock); \
58893+})
58894+
58895+enum {
58896+ GR_DO_AUDIT,
58897+ GR_DONT_AUDIT,
58898+ GR_DONT_AUDIT_GOOD
58899+};
58900+
58901+enum {
58902+ GR_TTYSNIFF,
58903+ GR_RBAC,
58904+ GR_RBAC_STR,
58905+ GR_STR_RBAC,
58906+ GR_RBAC_MODE2,
58907+ GR_RBAC_MODE3,
58908+ GR_FILENAME,
58909+ GR_SYSCTL_HIDDEN,
58910+ GR_NOARGS,
58911+ GR_ONE_INT,
58912+ GR_ONE_INT_TWO_STR,
58913+ GR_ONE_STR,
58914+ GR_STR_INT,
58915+ GR_TWO_STR_INT,
58916+ GR_TWO_INT,
58917+ GR_TWO_U64,
58918+ GR_THREE_INT,
58919+ GR_FIVE_INT_TWO_STR,
58920+ GR_TWO_STR,
58921+ GR_THREE_STR,
58922+ GR_FOUR_STR,
58923+ GR_STR_FILENAME,
58924+ GR_FILENAME_STR,
58925+ GR_FILENAME_TWO_INT,
58926+ GR_FILENAME_TWO_INT_STR,
58927+ GR_TEXTREL,
58928+ GR_PTRACE,
58929+ GR_RESOURCE,
58930+ GR_CAP,
58931+ GR_SIG,
58932+ GR_SIG2,
58933+ GR_CRASH1,
58934+ GR_CRASH2,
58935+ GR_PSACCT,
58936+ GR_RWXMAP
58937+};
58938+
58939+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58940+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58941+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58942+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58943+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58944+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58945+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58946+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58947+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58948+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58949+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58950+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58951+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58952+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58953+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58954+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58955+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58956+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58957+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58958+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58959+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58960+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58961+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58962+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58963+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58964+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58965+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58966+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58967+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58968+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58969+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58970+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58971+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58972+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58973+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58974+
58975+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58976+
58977+#endif
58978+
58979+#endif
58980diff -urNp linux-2.6.32.45/include/linux/grmsg.h linux-2.6.32.45/include/linux/grmsg.h
58981--- linux-2.6.32.45/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
58982+++ linux-2.6.32.45/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
58983@@ -0,0 +1,108 @@
58984+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
58985+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58986+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
58987+#define GR_STOPMOD_MSG "denied modification of module state by "
58988+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
58989+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58990+#define GR_IOPERM_MSG "denied use of ioperm() by "
58991+#define GR_IOPL_MSG "denied use of iopl() by "
58992+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
58993+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
58994+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
58995+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58996+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
58997+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
58998+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58999+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
59000+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
59001+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
59002+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
59003+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
59004+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
59005+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
59006+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
59007+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
59008+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
59009+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
59010+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
59011+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
59012+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
59013+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
59014+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
59015+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
59016+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
59017+#define GR_NPROC_MSG "denied overstep of process limit by "
59018+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
59019+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
59020+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
59021+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
59022+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
59023+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
59024+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
59025+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
59026+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
59027+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
59028+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
59029+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
59030+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
59031+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
59032+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
59033+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
59034+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
59035+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
59036+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
59037+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
59038+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
59039+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
59040+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
59041+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
59042+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
59043+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
59044+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
59045+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
59046+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
59047+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
59048+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
59049+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
59050+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
59051+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
59052+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
59053+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
59054+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
59055+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
59056+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
59057+#define GR_FAILFORK_MSG "failed fork with errno %s by "
59058+#define GR_NICE_CHROOT_MSG "denied priority change by "
59059+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
59060+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
59061+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
59062+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
59063+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
59064+#define GR_TIME_MSG "time set by "
59065+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
59066+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
59067+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
59068+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
59069+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
59070+#define GR_BIND_MSG "denied bind() by "
59071+#define GR_CONNECT_MSG "denied connect() by "
59072+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
59073+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
59074+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
59075+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
59076+#define GR_CAP_ACL_MSG "use of %s denied for "
59077+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
59078+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
59079+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
59080+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
59081+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
59082+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
59083+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
59084+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
59085+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
59086+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
59087+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
59088+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
59089+#define GR_VM86_MSG "denied use of vm86 by "
59090+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
59091+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
59092diff -urNp linux-2.6.32.45/include/linux/grsecurity.h linux-2.6.32.45/include/linux/grsecurity.h
59093--- linux-2.6.32.45/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
59094+++ linux-2.6.32.45/include/linux/grsecurity.h 2011-08-11 19:58:57.000000000 -0400
59095@@ -0,0 +1,217 @@
59096+#ifndef GR_SECURITY_H
59097+#define GR_SECURITY_H
59098+#include <linux/fs.h>
59099+#include <linux/fs_struct.h>
59100+#include <linux/binfmts.h>
59101+#include <linux/gracl.h>
59102+#include <linux/compat.h>
59103+
59104+/* notify of brain-dead configs */
59105+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59106+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
59107+#endif
59108+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
59109+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
59110+#endif
59111+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
59112+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
59113+#endif
59114+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
59115+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
59116+#endif
59117+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
59118+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
59119+#endif
59120+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
59121+#error "CONFIG_PAX enabled, but no PaX options are enabled."
59122+#endif
59123+
59124+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
59125+void gr_handle_brute_check(void);
59126+void gr_handle_kernel_exploit(void);
59127+int gr_process_user_ban(void);
59128+
59129+char gr_roletype_to_char(void);
59130+
59131+int gr_acl_enable_at_secure(void);
59132+
59133+int gr_check_user_change(int real, int effective, int fs);
59134+int gr_check_group_change(int real, int effective, int fs);
59135+
59136+void gr_del_task_from_ip_table(struct task_struct *p);
59137+
59138+int gr_pid_is_chrooted(struct task_struct *p);
59139+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
59140+int gr_handle_chroot_nice(void);
59141+int gr_handle_chroot_sysctl(const int op);
59142+int gr_handle_chroot_setpriority(struct task_struct *p,
59143+ const int niceval);
59144+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
59145+int gr_handle_chroot_chroot(const struct dentry *dentry,
59146+ const struct vfsmount *mnt);
59147+int gr_handle_chroot_caps(struct path *path);
59148+void gr_handle_chroot_chdir(struct path *path);
59149+int gr_handle_chroot_chmod(const struct dentry *dentry,
59150+ const struct vfsmount *mnt, const int mode);
59151+int gr_handle_chroot_mknod(const struct dentry *dentry,
59152+ const struct vfsmount *mnt, const int mode);
59153+int gr_handle_chroot_mount(const struct dentry *dentry,
59154+ const struct vfsmount *mnt,
59155+ const char *dev_name);
59156+int gr_handle_chroot_pivot(void);
59157+int gr_handle_chroot_unix(const pid_t pid);
59158+
59159+int gr_handle_rawio(const struct inode *inode);
59160+
59161+void gr_handle_ioperm(void);
59162+void gr_handle_iopl(void);
59163+
59164+int gr_tpe_allow(const struct file *file);
59165+
59166+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59167+void gr_clear_chroot_entries(struct task_struct *task);
59168+
59169+void gr_log_forkfail(const int retval);
59170+void gr_log_timechange(void);
59171+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59172+void gr_log_chdir(const struct dentry *dentry,
59173+ const struct vfsmount *mnt);
59174+void gr_log_chroot_exec(const struct dentry *dentry,
59175+ const struct vfsmount *mnt);
59176+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
59177+#ifdef CONFIG_COMPAT
59178+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
59179+#endif
59180+void gr_log_remount(const char *devname, const int retval);
59181+void gr_log_unmount(const char *devname, const int retval);
59182+void gr_log_mount(const char *from, const char *to, const int retval);
59183+void gr_log_textrel(struct vm_area_struct *vma);
59184+void gr_log_rwxmmap(struct file *file);
59185+void gr_log_rwxmprotect(struct file *file);
59186+
59187+int gr_handle_follow_link(const struct inode *parent,
59188+ const struct inode *inode,
59189+ const struct dentry *dentry,
59190+ const struct vfsmount *mnt);
59191+int gr_handle_fifo(const struct dentry *dentry,
59192+ const struct vfsmount *mnt,
59193+ const struct dentry *dir, const int flag,
59194+ const int acc_mode);
59195+int gr_handle_hardlink(const struct dentry *dentry,
59196+ const struct vfsmount *mnt,
59197+ struct inode *inode,
59198+ const int mode, const char *to);
59199+
59200+int gr_is_capable(const int cap);
59201+int gr_is_capable_nolog(const int cap);
59202+void gr_learn_resource(const struct task_struct *task, const int limit,
59203+ const unsigned long wanted, const int gt);
59204+void gr_copy_label(struct task_struct *tsk);
59205+void gr_handle_crash(struct task_struct *task, const int sig);
59206+int gr_handle_signal(const struct task_struct *p, const int sig);
59207+int gr_check_crash_uid(const uid_t uid);
59208+int gr_check_protected_task(const struct task_struct *task);
59209+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59210+int gr_acl_handle_mmap(const struct file *file,
59211+ const unsigned long prot);
59212+int gr_acl_handle_mprotect(const struct file *file,
59213+ const unsigned long prot);
59214+int gr_check_hidden_task(const struct task_struct *tsk);
59215+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59216+ const struct vfsmount *mnt);
59217+__u32 gr_acl_handle_utime(const struct dentry *dentry,
59218+ const struct vfsmount *mnt);
59219+__u32 gr_acl_handle_access(const struct dentry *dentry,
59220+ const struct vfsmount *mnt, const int fmode);
59221+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
59222+ const struct vfsmount *mnt, mode_t mode);
59223+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59224+ const struct vfsmount *mnt, mode_t mode);
59225+__u32 gr_acl_handle_chown(const struct dentry *dentry,
59226+ const struct vfsmount *mnt);
59227+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59228+ const struct vfsmount *mnt);
59229+int gr_handle_ptrace(struct task_struct *task, const long request);
59230+int gr_handle_proc_ptrace(struct task_struct *task);
59231+__u32 gr_acl_handle_execve(const struct dentry *dentry,
59232+ const struct vfsmount *mnt);
59233+int gr_check_crash_exec(const struct file *filp);
59234+int gr_acl_is_enabled(void);
59235+void gr_set_kernel_label(struct task_struct *task);
59236+void gr_set_role_label(struct task_struct *task, const uid_t uid,
59237+ const gid_t gid);
59238+int gr_set_proc_label(const struct dentry *dentry,
59239+ const struct vfsmount *mnt,
59240+ const int unsafe_share);
59241+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59242+ const struct vfsmount *mnt);
59243+__u32 gr_acl_handle_open(const struct dentry *dentry,
59244+ const struct vfsmount *mnt, const int fmode);
59245+__u32 gr_acl_handle_creat(const struct dentry *dentry,
59246+ const struct dentry *p_dentry,
59247+ const struct vfsmount *p_mnt, const int fmode,
59248+ const int imode);
59249+void gr_handle_create(const struct dentry *dentry,
59250+ const struct vfsmount *mnt);
59251+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59252+ const struct dentry *parent_dentry,
59253+ const struct vfsmount *parent_mnt,
59254+ const int mode);
59255+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59256+ const struct dentry *parent_dentry,
59257+ const struct vfsmount *parent_mnt);
59258+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59259+ const struct vfsmount *mnt);
59260+void gr_handle_delete(const ino_t ino, const dev_t dev);
59261+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59262+ const struct vfsmount *mnt);
59263+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59264+ const struct dentry *parent_dentry,
59265+ const struct vfsmount *parent_mnt,
59266+ const char *from);
59267+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59268+ const struct dentry *parent_dentry,
59269+ const struct vfsmount *parent_mnt,
59270+ const struct dentry *old_dentry,
59271+ const struct vfsmount *old_mnt, const char *to);
59272+int gr_acl_handle_rename(struct dentry *new_dentry,
59273+ struct dentry *parent_dentry,
59274+ const struct vfsmount *parent_mnt,
59275+ struct dentry *old_dentry,
59276+ struct inode *old_parent_inode,
59277+ struct vfsmount *old_mnt, const char *newname);
59278+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59279+ struct dentry *old_dentry,
59280+ struct dentry *new_dentry,
59281+ struct vfsmount *mnt, const __u8 replace);
59282+__u32 gr_check_link(const struct dentry *new_dentry,
59283+ const struct dentry *parent_dentry,
59284+ const struct vfsmount *parent_mnt,
59285+ const struct dentry *old_dentry,
59286+ const struct vfsmount *old_mnt);
59287+int gr_acl_handle_filldir(const struct file *file, const char *name,
59288+ const unsigned int namelen, const ino_t ino);
59289+
59290+__u32 gr_acl_handle_unix(const struct dentry *dentry,
59291+ const struct vfsmount *mnt);
59292+void gr_acl_handle_exit(void);
59293+void gr_acl_handle_psacct(struct task_struct *task, const long code);
59294+int gr_acl_handle_procpidmem(const struct task_struct *task);
59295+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59296+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59297+void gr_audit_ptrace(struct task_struct *task);
59298+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59299+
59300+#ifdef CONFIG_GRKERNSEC
59301+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59302+void gr_handle_vm86(void);
59303+void gr_handle_mem_readwrite(u64 from, u64 to);
59304+
59305+extern int grsec_enable_dmesg;
59306+extern int grsec_disable_privio;
59307+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59308+extern int grsec_enable_chroot_findtask;
59309+#endif
59310+#endif
59311+
59312+#endif
59313diff -urNp linux-2.6.32.45/include/linux/hdpu_features.h linux-2.6.32.45/include/linux/hdpu_features.h
59314--- linux-2.6.32.45/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
59315+++ linux-2.6.32.45/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
59316@@ -3,7 +3,7 @@
59317 struct cpustate_t {
59318 spinlock_t lock;
59319 int excl;
59320- int open_count;
59321+ atomic_t open_count;
59322 unsigned char cached_val;
59323 int inited;
59324 unsigned long *set_addr;
59325diff -urNp linux-2.6.32.45/include/linux/highmem.h linux-2.6.32.45/include/linux/highmem.h
59326--- linux-2.6.32.45/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
59327+++ linux-2.6.32.45/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
59328@@ -137,6 +137,18 @@ static inline void clear_highpage(struct
59329 kunmap_atomic(kaddr, KM_USER0);
59330 }
59331
59332+static inline void sanitize_highpage(struct page *page)
59333+{
59334+ void *kaddr;
59335+ unsigned long flags;
59336+
59337+ local_irq_save(flags);
59338+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
59339+ clear_page(kaddr);
59340+ kunmap_atomic(kaddr, KM_CLEARPAGE);
59341+ local_irq_restore(flags);
59342+}
59343+
59344 static inline void zero_user_segments(struct page *page,
59345 unsigned start1, unsigned end1,
59346 unsigned start2, unsigned end2)
59347diff -urNp linux-2.6.32.45/include/linux/i2o.h linux-2.6.32.45/include/linux/i2o.h
59348--- linux-2.6.32.45/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
59349+++ linux-2.6.32.45/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
59350@@ -564,7 +564,7 @@ struct i2o_controller {
59351 struct i2o_device *exec; /* Executive */
59352 #if BITS_PER_LONG == 64
59353 spinlock_t context_list_lock; /* lock for context_list */
59354- atomic_t context_list_counter; /* needed for unique contexts */
59355+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59356 struct list_head context_list; /* list of context id's
59357 and pointers */
59358 #endif
59359diff -urNp linux-2.6.32.45/include/linux/init_task.h linux-2.6.32.45/include/linux/init_task.h
59360--- linux-2.6.32.45/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
59361+++ linux-2.6.32.45/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
59362@@ -83,6 +83,12 @@ extern struct group_info init_groups;
59363 #define INIT_IDS
59364 #endif
59365
59366+#ifdef CONFIG_X86
59367+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59368+#else
59369+#define INIT_TASK_THREAD_INFO
59370+#endif
59371+
59372 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
59373 /*
59374 * Because of the reduced scope of CAP_SETPCAP when filesystem
59375@@ -156,6 +162,7 @@ extern struct cred init_cred;
59376 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
59377 .comm = "swapper", \
59378 .thread = INIT_THREAD, \
59379+ INIT_TASK_THREAD_INFO \
59380 .fs = &init_fs, \
59381 .files = &init_files, \
59382 .signal = &init_signals, \
59383diff -urNp linux-2.6.32.45/include/linux/intel-iommu.h linux-2.6.32.45/include/linux/intel-iommu.h
59384--- linux-2.6.32.45/include/linux/intel-iommu.h 2011-03-27 14:31:47.000000000 -0400
59385+++ linux-2.6.32.45/include/linux/intel-iommu.h 2011-08-05 20:33:55.000000000 -0400
59386@@ -296,7 +296,7 @@ struct iommu_flush {
59387 u8 fm, u64 type);
59388 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59389 unsigned int size_order, u64 type);
59390-};
59391+} __no_const;
59392
59393 enum {
59394 SR_DMAR_FECTL_REG,
59395diff -urNp linux-2.6.32.45/include/linux/interrupt.h linux-2.6.32.45/include/linux/interrupt.h
59396--- linux-2.6.32.45/include/linux/interrupt.h 2011-06-25 12:55:35.000000000 -0400
59397+++ linux-2.6.32.45/include/linux/interrupt.h 2011-06-25 12:56:37.000000000 -0400
59398@@ -363,7 +363,7 @@ enum
59399 /* map softirq index to softirq name. update 'softirq_to_name' in
59400 * kernel/softirq.c when adding a new softirq.
59401 */
59402-extern char *softirq_to_name[NR_SOFTIRQS];
59403+extern const char * const softirq_to_name[NR_SOFTIRQS];
59404
59405 /* softirq mask and active fields moved to irq_cpustat_t in
59406 * asm/hardirq.h to get better cache usage. KAO
59407@@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
59408
59409 struct softirq_action
59410 {
59411- void (*action)(struct softirq_action *);
59412+ void (*action)(void);
59413 };
59414
59415 asmlinkage void do_softirq(void);
59416 asmlinkage void __do_softirq(void);
59417-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59418+extern void open_softirq(int nr, void (*action)(void));
59419 extern void softirq_init(void);
59420 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
59421 extern void raise_softirq_irqoff(unsigned int nr);
59422diff -urNp linux-2.6.32.45/include/linux/irq.h linux-2.6.32.45/include/linux/irq.h
59423--- linux-2.6.32.45/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
59424+++ linux-2.6.32.45/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
59425@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
59426 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
59427 bool boot)
59428 {
59429+#ifdef CONFIG_CPUMASK_OFFSTACK
59430 gfp_t gfp = GFP_ATOMIC;
59431
59432 if (boot)
59433 gfp = GFP_NOWAIT;
59434
59435-#ifdef CONFIG_CPUMASK_OFFSTACK
59436 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
59437 return false;
59438
59439diff -urNp linux-2.6.32.45/include/linux/kallsyms.h linux-2.6.32.45/include/linux/kallsyms.h
59440--- linux-2.6.32.45/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
59441+++ linux-2.6.32.45/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
59442@@ -15,7 +15,8 @@
59443
59444 struct module;
59445
59446-#ifdef CONFIG_KALLSYMS
59447+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59448+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59449 /* Lookup the address for a symbol. Returns 0 if not found. */
59450 unsigned long kallsyms_lookup_name(const char *name);
59451
59452@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
59453 /* Stupid that this does nothing, but I didn't create this mess. */
59454 #define __print_symbol(fmt, addr)
59455 #endif /*CONFIG_KALLSYMS*/
59456+#else /* when included by kallsyms.c, vsnprintf.c, or
59457+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59458+extern void __print_symbol(const char *fmt, unsigned long address);
59459+extern int sprint_symbol(char *buffer, unsigned long address);
59460+const char *kallsyms_lookup(unsigned long addr,
59461+ unsigned long *symbolsize,
59462+ unsigned long *offset,
59463+ char **modname, char *namebuf);
59464+#endif
59465
59466 /* This macro allows us to keep printk typechecking */
59467 static void __check_printsym_format(const char *fmt, ...)
59468diff -urNp linux-2.6.32.45/include/linux/kgdb.h linux-2.6.32.45/include/linux/kgdb.h
59469--- linux-2.6.32.45/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
59470+++ linux-2.6.32.45/include/linux/kgdb.h 2011-08-05 20:33:55.000000000 -0400
59471@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
59472
59473 extern int kgdb_connected;
59474
59475-extern atomic_t kgdb_setting_breakpoint;
59476-extern atomic_t kgdb_cpu_doing_single_step;
59477+extern atomic_unchecked_t kgdb_setting_breakpoint;
59478+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59479
59480 extern struct task_struct *kgdb_usethread;
59481 extern struct task_struct *kgdb_contthread;
59482@@ -226,8 +226,8 @@ extern int kgdb_arch_remove_breakpoint(u
59483 * hardware debug registers.
59484 */
59485 struct kgdb_arch {
59486- unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
59487- unsigned long flags;
59488+ const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
59489+ const unsigned long flags;
59490
59491 int (*set_breakpoint)(unsigned long, char *);
59492 int (*remove_breakpoint)(unsigned long, char *);
59493@@ -251,20 +251,20 @@ struct kgdb_arch {
59494 */
59495 struct kgdb_io {
59496 const char *name;
59497- int (*read_char) (void);
59498- void (*write_char) (u8);
59499- void (*flush) (void);
59500- int (*init) (void);
59501- void (*pre_exception) (void);
59502- void (*post_exception) (void);
59503+ int (* const read_char) (void);
59504+ void (* const write_char) (u8);
59505+ void (* const flush) (void);
59506+ int (* const init) (void);
59507+ void (* const pre_exception) (void);
59508+ void (* const post_exception) (void);
59509 };
59510
59511-extern struct kgdb_arch arch_kgdb_ops;
59512+extern const struct kgdb_arch arch_kgdb_ops;
59513
59514 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
59515
59516-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
59517-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
59518+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
59519+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
59520
59521 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
59522 extern int kgdb_mem2hex(char *mem, char *buf, int count);
59523diff -urNp linux-2.6.32.45/include/linux/kmod.h linux-2.6.32.45/include/linux/kmod.h
59524--- linux-2.6.32.45/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
59525+++ linux-2.6.32.45/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
59526@@ -31,6 +31,8 @@
59527 * usually useless though. */
59528 extern int __request_module(bool wait, const char *name, ...) \
59529 __attribute__((format(printf, 2, 3)));
59530+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
59531+ __attribute__((format(printf, 3, 4)));
59532 #define request_module(mod...) __request_module(true, mod)
59533 #define request_module_nowait(mod...) __request_module(false, mod)
59534 #define try_then_request_module(x, mod...) \
59535diff -urNp linux-2.6.32.45/include/linux/kobject.h linux-2.6.32.45/include/linux/kobject.h
59536--- linux-2.6.32.45/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
59537+++ linux-2.6.32.45/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
59538@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
59539
59540 struct kobj_type {
59541 void (*release)(struct kobject *kobj);
59542- struct sysfs_ops *sysfs_ops;
59543+ const struct sysfs_ops *sysfs_ops;
59544 struct attribute **default_attrs;
59545 };
59546
59547@@ -118,9 +118,9 @@ struct kobj_uevent_env {
59548 };
59549
59550 struct kset_uevent_ops {
59551- int (*filter)(struct kset *kset, struct kobject *kobj);
59552- const char *(*name)(struct kset *kset, struct kobject *kobj);
59553- int (*uevent)(struct kset *kset, struct kobject *kobj,
59554+ int (* const filter)(struct kset *kset, struct kobject *kobj);
59555+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
59556+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
59557 struct kobj_uevent_env *env);
59558 };
59559
59560@@ -132,7 +132,7 @@ struct kobj_attribute {
59561 const char *buf, size_t count);
59562 };
59563
59564-extern struct sysfs_ops kobj_sysfs_ops;
59565+extern const struct sysfs_ops kobj_sysfs_ops;
59566
59567 /**
59568 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
59569@@ -155,14 +155,14 @@ struct kset {
59570 struct list_head list;
59571 spinlock_t list_lock;
59572 struct kobject kobj;
59573- struct kset_uevent_ops *uevent_ops;
59574+ const struct kset_uevent_ops *uevent_ops;
59575 };
59576
59577 extern void kset_init(struct kset *kset);
59578 extern int __must_check kset_register(struct kset *kset);
59579 extern void kset_unregister(struct kset *kset);
59580 extern struct kset * __must_check kset_create_and_add(const char *name,
59581- struct kset_uevent_ops *u,
59582+ const struct kset_uevent_ops *u,
59583 struct kobject *parent_kobj);
59584
59585 static inline struct kset *to_kset(struct kobject *kobj)
59586diff -urNp linux-2.6.32.45/include/linux/kvm_host.h linux-2.6.32.45/include/linux/kvm_host.h
59587--- linux-2.6.32.45/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
59588+++ linux-2.6.32.45/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
59589@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
59590 void vcpu_load(struct kvm_vcpu *vcpu);
59591 void vcpu_put(struct kvm_vcpu *vcpu);
59592
59593-int kvm_init(void *opaque, unsigned int vcpu_size,
59594+int kvm_init(const void *opaque, unsigned int vcpu_size,
59595 struct module *module);
59596 void kvm_exit(void);
59597
59598@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
59599 struct kvm_guest_debug *dbg);
59600 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59601
59602-int kvm_arch_init(void *opaque);
59603+int kvm_arch_init(const void *opaque);
59604 void kvm_arch_exit(void);
59605
59606 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59607diff -urNp linux-2.6.32.45/include/linux/libata.h linux-2.6.32.45/include/linux/libata.h
59608--- linux-2.6.32.45/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
59609+++ linux-2.6.32.45/include/linux/libata.h 2011-08-05 20:33:55.000000000 -0400
59610@@ -525,11 +525,11 @@ struct ata_ioports {
59611
59612 struct ata_host {
59613 spinlock_t lock;
59614- struct device *dev;
59615+ struct device *dev;
59616 void __iomem * const *iomap;
59617 unsigned int n_ports;
59618 void *private_data;
59619- struct ata_port_operations *ops;
59620+ const struct ata_port_operations *ops;
59621 unsigned long flags;
59622 #ifdef CONFIG_ATA_ACPI
59623 acpi_handle acpi_handle;
59624@@ -710,7 +710,7 @@ struct ata_link {
59625
59626 struct ata_port {
59627 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
59628- struct ata_port_operations *ops;
59629+ const struct ata_port_operations *ops;
59630 spinlock_t *lock;
59631 /* Flags owned by the EH context. Only EH should touch these once the
59632 port is active */
59633@@ -883,7 +883,7 @@ struct ata_port_operations {
59634 * ->inherits must be the last field and all the preceding
59635 * fields must be pointers.
59636 */
59637- const struct ata_port_operations *inherits;
59638+ const struct ata_port_operations * const inherits;
59639 };
59640
59641 struct ata_port_info {
59642@@ -892,7 +892,7 @@ struct ata_port_info {
59643 unsigned long pio_mask;
59644 unsigned long mwdma_mask;
59645 unsigned long udma_mask;
59646- struct ata_port_operations *port_ops;
59647+ const struct ata_port_operations *port_ops;
59648 void *private_data;
59649 };
59650
59651@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
59652 extern const unsigned long sata_deb_timing_hotplug[];
59653 extern const unsigned long sata_deb_timing_long[];
59654
59655-extern struct ata_port_operations ata_dummy_port_ops;
59656+extern const struct ata_port_operations ata_dummy_port_ops;
59657 extern const struct ata_port_info ata_dummy_port_info;
59658
59659 static inline const unsigned long *
59660@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
59661 struct scsi_host_template *sht);
59662 extern void ata_host_detach(struct ata_host *host);
59663 extern void ata_host_init(struct ata_host *, struct device *,
59664- unsigned long, struct ata_port_operations *);
59665+ unsigned long, const struct ata_port_operations *);
59666 extern int ata_scsi_detect(struct scsi_host_template *sht);
59667 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
59668 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
59669diff -urNp linux-2.6.32.45/include/linux/lockd/bind.h linux-2.6.32.45/include/linux/lockd/bind.h
59670--- linux-2.6.32.45/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
59671+++ linux-2.6.32.45/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
59672@@ -23,13 +23,13 @@ struct svc_rqst;
59673 * This is the set of functions for lockd->nfsd communication
59674 */
59675 struct nlmsvc_binding {
59676- __be32 (*fopen)(struct svc_rqst *,
59677+ __be32 (* const fopen)(struct svc_rqst *,
59678 struct nfs_fh *,
59679 struct file **);
59680- void (*fclose)(struct file *);
59681+ void (* const fclose)(struct file *);
59682 };
59683
59684-extern struct nlmsvc_binding * nlmsvc_ops;
59685+extern const struct nlmsvc_binding * nlmsvc_ops;
59686
59687 /*
59688 * Similar to nfs_client_initdata, but without the NFS-specific
59689diff -urNp linux-2.6.32.45/include/linux/mca.h linux-2.6.32.45/include/linux/mca.h
59690--- linux-2.6.32.45/include/linux/mca.h 2011-03-27 14:31:47.000000000 -0400
59691+++ linux-2.6.32.45/include/linux/mca.h 2011-08-05 20:33:55.000000000 -0400
59692@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59693 int region);
59694 void * (*mca_transform_memory)(struct mca_device *,
59695 void *memory);
59696-};
59697+} __no_const;
59698
59699 struct mca_bus {
59700 u64 default_dma_mask;
59701diff -urNp linux-2.6.32.45/include/linux/memory.h linux-2.6.32.45/include/linux/memory.h
59702--- linux-2.6.32.45/include/linux/memory.h 2011-03-27 14:31:47.000000000 -0400
59703+++ linux-2.6.32.45/include/linux/memory.h 2011-08-05 20:33:55.000000000 -0400
59704@@ -108,7 +108,7 @@ struct memory_accessor {
59705 size_t count);
59706 ssize_t (*write)(struct memory_accessor *, const char *buf,
59707 off_t offset, size_t count);
59708-};
59709+} __no_const;
59710
59711 /*
59712 * Kernel text modification mutex, used for code patching. Users of this lock
59713diff -urNp linux-2.6.32.45/include/linux/mm.h linux-2.6.32.45/include/linux/mm.h
59714--- linux-2.6.32.45/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
59715+++ linux-2.6.32.45/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
59716@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
59717
59718 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59719 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59720+
59721+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59722+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59723+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59724+#else
59725 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59726+#endif
59727+
59728 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59729 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59730
59731@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
59732 int set_page_dirty_lock(struct page *page);
59733 int clear_page_dirty_for_io(struct page *page);
59734
59735-/* Is the vma a continuation of the stack vma above it? */
59736-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
59737-{
59738- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59739-}
59740-
59741 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59742 unsigned long old_addr, struct vm_area_struct *new_vma,
59743 unsigned long new_addr, unsigned long len);
59744@@ -890,6 +891,8 @@ struct shrinker {
59745 extern void register_shrinker(struct shrinker *);
59746 extern void unregister_shrinker(struct shrinker *);
59747
59748+pgprot_t vm_get_page_prot(unsigned long vm_flags);
59749+
59750 int vma_wants_writenotify(struct vm_area_struct *vma);
59751
59752 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
59753@@ -1162,6 +1165,7 @@ out:
59754 }
59755
59756 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59757+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59758
59759 extern unsigned long do_brk(unsigned long, unsigned long);
59760
59761@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
59762 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59763 struct vm_area_struct **pprev);
59764
59765+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59766+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59767+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59768+
59769 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59770 NULL if none. Assume start_addr < end_addr. */
59771 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59772@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
59773 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59774 }
59775
59776-pgprot_t vm_get_page_prot(unsigned long vm_flags);
59777 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59778 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59779 unsigned long pfn, unsigned long size, pgprot_t);
59780@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
59781 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
59782 extern int sysctl_memory_failure_early_kill;
59783 extern int sysctl_memory_failure_recovery;
59784-extern atomic_long_t mce_bad_pages;
59785+extern atomic_long_unchecked_t mce_bad_pages;
59786+
59787+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59788+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59789+#else
59790+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59791+#endif
59792
59793 #endif /* __KERNEL__ */
59794 #endif /* _LINUX_MM_H */
59795diff -urNp linux-2.6.32.45/include/linux/mm_types.h linux-2.6.32.45/include/linux/mm_types.h
59796--- linux-2.6.32.45/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
59797+++ linux-2.6.32.45/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
59798@@ -186,6 +186,8 @@ struct vm_area_struct {
59799 #ifdef CONFIG_NUMA
59800 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59801 #endif
59802+
59803+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59804 };
59805
59806 struct core_thread {
59807@@ -287,6 +289,24 @@ struct mm_struct {
59808 #ifdef CONFIG_MMU_NOTIFIER
59809 struct mmu_notifier_mm *mmu_notifier_mm;
59810 #endif
59811+
59812+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59813+ unsigned long pax_flags;
59814+#endif
59815+
59816+#ifdef CONFIG_PAX_DLRESOLVE
59817+ unsigned long call_dl_resolve;
59818+#endif
59819+
59820+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59821+ unsigned long call_syscall;
59822+#endif
59823+
59824+#ifdef CONFIG_PAX_ASLR
59825+ unsigned long delta_mmap; /* randomized offset */
59826+ unsigned long delta_stack; /* randomized offset */
59827+#endif
59828+
59829 };
59830
59831 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
59832diff -urNp linux-2.6.32.45/include/linux/mmu_notifier.h linux-2.6.32.45/include/linux/mmu_notifier.h
59833--- linux-2.6.32.45/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
59834+++ linux-2.6.32.45/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
59835@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
59836 */
59837 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59838 ({ \
59839- pte_t __pte; \
59840+ pte_t ___pte; \
59841 struct vm_area_struct *___vma = __vma; \
59842 unsigned long ___address = __address; \
59843- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59844+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59845 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59846- __pte; \
59847+ ___pte; \
59848 })
59849
59850 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
59851diff -urNp linux-2.6.32.45/include/linux/mmzone.h linux-2.6.32.45/include/linux/mmzone.h
59852--- linux-2.6.32.45/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
59853+++ linux-2.6.32.45/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
59854@@ -350,7 +350,7 @@ struct zone {
59855 unsigned long flags; /* zone flags, see below */
59856
59857 /* Zone statistics */
59858- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59859+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59860
59861 /*
59862 * prev_priority holds the scanning priority for this zone. It is
59863diff -urNp linux-2.6.32.45/include/linux/mod_devicetable.h linux-2.6.32.45/include/linux/mod_devicetable.h
59864--- linux-2.6.32.45/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
59865+++ linux-2.6.32.45/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
59866@@ -12,7 +12,7 @@
59867 typedef unsigned long kernel_ulong_t;
59868 #endif
59869
59870-#define PCI_ANY_ID (~0)
59871+#define PCI_ANY_ID ((__u16)~0)
59872
59873 struct pci_device_id {
59874 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59875@@ -131,7 +131,7 @@ struct usb_device_id {
59876 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59877 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59878
59879-#define HID_ANY_ID (~0)
59880+#define HID_ANY_ID (~0U)
59881
59882 struct hid_device_id {
59883 __u16 bus;
59884diff -urNp linux-2.6.32.45/include/linux/module.h linux-2.6.32.45/include/linux/module.h
59885--- linux-2.6.32.45/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
59886+++ linux-2.6.32.45/include/linux/module.h 2011-08-05 20:33:55.000000000 -0400
59887@@ -16,6 +16,7 @@
59888 #include <linux/kobject.h>
59889 #include <linux/moduleparam.h>
59890 #include <linux/tracepoint.h>
59891+#include <linux/fs.h>
59892
59893 #include <asm/local.h>
59894 #include <asm/module.h>
59895@@ -287,16 +288,16 @@ struct module
59896 int (*init)(void);
59897
59898 /* If this is non-NULL, vfree after init() returns */
59899- void *module_init;
59900+ void *module_init_rx, *module_init_rw;
59901
59902 /* Here is the actual code + data, vfree'd on unload. */
59903- void *module_core;
59904+ void *module_core_rx, *module_core_rw;
59905
59906 /* Here are the sizes of the init and core sections */
59907- unsigned int init_size, core_size;
59908+ unsigned int init_size_rw, core_size_rw;
59909
59910 /* The size of the executable code in each section. */
59911- unsigned int init_text_size, core_text_size;
59912+ unsigned int init_size_rx, core_size_rx;
59913
59914 /* Arch-specific module values */
59915 struct mod_arch_specific arch;
59916@@ -345,6 +346,10 @@ struct module
59917 #ifdef CONFIG_EVENT_TRACING
59918 struct ftrace_event_call *trace_events;
59919 unsigned int num_trace_events;
59920+ struct file_operations trace_id;
59921+ struct file_operations trace_enable;
59922+ struct file_operations trace_format;
59923+ struct file_operations trace_filter;
59924 #endif
59925 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59926 unsigned long *ftrace_callsites;
59927@@ -393,16 +398,46 @@ struct module *__module_address(unsigned
59928 bool is_module_address(unsigned long addr);
59929 bool is_module_text_address(unsigned long addr);
59930
59931+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59932+{
59933+
59934+#ifdef CONFIG_PAX_KERNEXEC
59935+ if (ktla_ktva(addr) >= (unsigned long)start &&
59936+ ktla_ktva(addr) < (unsigned long)start + size)
59937+ return 1;
59938+#endif
59939+
59940+ return ((void *)addr >= start && (void *)addr < start + size);
59941+}
59942+
59943+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59944+{
59945+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59946+}
59947+
59948+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59949+{
59950+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59951+}
59952+
59953+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59954+{
59955+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59956+}
59957+
59958+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59959+{
59960+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59961+}
59962+
59963 static inline int within_module_core(unsigned long addr, struct module *mod)
59964 {
59965- return (unsigned long)mod->module_core <= addr &&
59966- addr < (unsigned long)mod->module_core + mod->core_size;
59967+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59968 }
59969
59970 static inline int within_module_init(unsigned long addr, struct module *mod)
59971 {
59972- return (unsigned long)mod->module_init <= addr &&
59973- addr < (unsigned long)mod->module_init + mod->init_size;
59974+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59975 }
59976
59977 /* Search for module by name: must hold module_mutex. */
59978diff -urNp linux-2.6.32.45/include/linux/moduleloader.h linux-2.6.32.45/include/linux/moduleloader.h
59979--- linux-2.6.32.45/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
59980+++ linux-2.6.32.45/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
59981@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
59982 sections. Returns NULL on failure. */
59983 void *module_alloc(unsigned long size);
59984
59985+#ifdef CONFIG_PAX_KERNEXEC
59986+void *module_alloc_exec(unsigned long size);
59987+#else
59988+#define module_alloc_exec(x) module_alloc(x)
59989+#endif
59990+
59991 /* Free memory returned from module_alloc. */
59992 void module_free(struct module *mod, void *module_region);
59993
59994+#ifdef CONFIG_PAX_KERNEXEC
59995+void module_free_exec(struct module *mod, void *module_region);
59996+#else
59997+#define module_free_exec(x, y) module_free((x), (y))
59998+#endif
59999+
60000 /* Apply the given relocation to the (simplified) ELF. Return -error
60001 or 0. */
60002 int apply_relocate(Elf_Shdr *sechdrs,
60003diff -urNp linux-2.6.32.45/include/linux/moduleparam.h linux-2.6.32.45/include/linux/moduleparam.h
60004--- linux-2.6.32.45/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
60005+++ linux-2.6.32.45/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
60006@@ -132,7 +132,7 @@ struct kparam_array
60007
60008 /* Actually copy string: maxlen param is usually sizeof(string). */
60009 #define module_param_string(name, string, len, perm) \
60010- static const struct kparam_string __param_string_##name \
60011+ static const struct kparam_string __param_string_##name __used \
60012 = { len, string }; \
60013 __module_param_call(MODULE_PARAM_PREFIX, name, \
60014 param_set_copystring, param_get_string, \
60015@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
60016
60017 /* Comma-separated array: *nump is set to number they actually specified. */
60018 #define module_param_array_named(name, array, type, nump, perm) \
60019- static const struct kparam_array __param_arr_##name \
60020+ static const struct kparam_array __param_arr_##name __used \
60021 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
60022 sizeof(array[0]), array }; \
60023 __module_param_call(MODULE_PARAM_PREFIX, name, \
60024diff -urNp linux-2.6.32.45/include/linux/mutex.h linux-2.6.32.45/include/linux/mutex.h
60025--- linux-2.6.32.45/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
60026+++ linux-2.6.32.45/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
60027@@ -51,7 +51,7 @@ struct mutex {
60028 spinlock_t wait_lock;
60029 struct list_head wait_list;
60030 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
60031- struct thread_info *owner;
60032+ struct task_struct *owner;
60033 #endif
60034 #ifdef CONFIG_DEBUG_MUTEXES
60035 const char *name;
60036diff -urNp linux-2.6.32.45/include/linux/namei.h linux-2.6.32.45/include/linux/namei.h
60037--- linux-2.6.32.45/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
60038+++ linux-2.6.32.45/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
60039@@ -22,7 +22,7 @@ struct nameidata {
60040 unsigned int flags;
60041 int last_type;
60042 unsigned depth;
60043- char *saved_names[MAX_NESTED_LINKS + 1];
60044+ const char *saved_names[MAX_NESTED_LINKS + 1];
60045
60046 /* Intent data */
60047 union {
60048@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
60049 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
60050 extern void unlock_rename(struct dentry *, struct dentry *);
60051
60052-static inline void nd_set_link(struct nameidata *nd, char *path)
60053+static inline void nd_set_link(struct nameidata *nd, const char *path)
60054 {
60055 nd->saved_names[nd->depth] = path;
60056 }
60057
60058-static inline char *nd_get_link(struct nameidata *nd)
60059+static inline const char *nd_get_link(const struct nameidata *nd)
60060 {
60061 return nd->saved_names[nd->depth];
60062 }
60063diff -urNp linux-2.6.32.45/include/linux/netfilter/xt_gradm.h linux-2.6.32.45/include/linux/netfilter/xt_gradm.h
60064--- linux-2.6.32.45/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
60065+++ linux-2.6.32.45/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
60066@@ -0,0 +1,9 @@
60067+#ifndef _LINUX_NETFILTER_XT_GRADM_H
60068+#define _LINUX_NETFILTER_XT_GRADM_H 1
60069+
60070+struct xt_gradm_mtinfo {
60071+ __u16 flags;
60072+ __u16 invflags;
60073+};
60074+
60075+#endif
60076diff -urNp linux-2.6.32.45/include/linux/nodemask.h linux-2.6.32.45/include/linux/nodemask.h
60077--- linux-2.6.32.45/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
60078+++ linux-2.6.32.45/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
60079@@ -464,11 +464,11 @@ static inline int num_node_state(enum no
60080
60081 #define any_online_node(mask) \
60082 ({ \
60083- int node; \
60084- for_each_node_mask(node, (mask)) \
60085- if (node_online(node)) \
60086+ int __node; \
60087+ for_each_node_mask(__node, (mask)) \
60088+ if (node_online(__node)) \
60089 break; \
60090- node; \
60091+ __node; \
60092 })
60093
60094 #define num_online_nodes() num_node_state(N_ONLINE)
60095diff -urNp linux-2.6.32.45/include/linux/oprofile.h linux-2.6.32.45/include/linux/oprofile.h
60096--- linux-2.6.32.45/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
60097+++ linux-2.6.32.45/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
60098@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
60099 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
60100 char const * name, ulong * val);
60101
60102-/** Create a file for read-only access to an atomic_t. */
60103+/** Create a file for read-only access to an atomic_unchecked_t. */
60104 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
60105- char const * name, atomic_t * val);
60106+ char const * name, atomic_unchecked_t * val);
60107
60108 /** create a directory */
60109 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
60110diff -urNp linux-2.6.32.45/include/linux/pagemap.h linux-2.6.32.45/include/linux/pagemap.h
60111--- linux-2.6.32.45/include/linux/pagemap.h 2011-03-27 14:31:47.000000000 -0400
60112+++ linux-2.6.32.45/include/linux/pagemap.h 2011-08-17 19:36:28.000000000 -0400
60113@@ -425,6 +425,7 @@ static inline int fault_in_pages_readabl
60114 if (((unsigned long)uaddr & PAGE_MASK) !=
60115 ((unsigned long)end & PAGE_MASK))
60116 ret = __get_user(c, end);
60117+ (void)c;
60118 }
60119 return ret;
60120 }
60121diff -urNp linux-2.6.32.45/include/linux/perf_event.h linux-2.6.32.45/include/linux/perf_event.h
60122--- linux-2.6.32.45/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
60123+++ linux-2.6.32.45/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
60124@@ -476,7 +476,7 @@ struct hw_perf_event {
60125 struct hrtimer hrtimer;
60126 };
60127 };
60128- atomic64_t prev_count;
60129+ atomic64_unchecked_t prev_count;
60130 u64 sample_period;
60131 u64 last_period;
60132 atomic64_t period_left;
60133@@ -557,7 +557,7 @@ struct perf_event {
60134 const struct pmu *pmu;
60135
60136 enum perf_event_active_state state;
60137- atomic64_t count;
60138+ atomic64_unchecked_t count;
60139
60140 /*
60141 * These are the total time in nanoseconds that the event
60142@@ -595,8 +595,8 @@ struct perf_event {
60143 * These accumulate total time (in nanoseconds) that children
60144 * events have been enabled and running, respectively.
60145 */
60146- atomic64_t child_total_time_enabled;
60147- atomic64_t child_total_time_running;
60148+ atomic64_unchecked_t child_total_time_enabled;
60149+ atomic64_unchecked_t child_total_time_running;
60150
60151 /*
60152 * Protect attach/detach and child_list:
60153diff -urNp linux-2.6.32.45/include/linux/pipe_fs_i.h linux-2.6.32.45/include/linux/pipe_fs_i.h
60154--- linux-2.6.32.45/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
60155+++ linux-2.6.32.45/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
60156@@ -46,9 +46,9 @@ struct pipe_inode_info {
60157 wait_queue_head_t wait;
60158 unsigned int nrbufs, curbuf;
60159 struct page *tmp_page;
60160- unsigned int readers;
60161- unsigned int writers;
60162- unsigned int waiting_writers;
60163+ atomic_t readers;
60164+ atomic_t writers;
60165+ atomic_t waiting_writers;
60166 unsigned int r_counter;
60167 unsigned int w_counter;
60168 struct fasync_struct *fasync_readers;
60169diff -urNp linux-2.6.32.45/include/linux/poison.h linux-2.6.32.45/include/linux/poison.h
60170--- linux-2.6.32.45/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
60171+++ linux-2.6.32.45/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
60172@@ -19,8 +19,8 @@
60173 * under normal circumstances, used to verify that nobody uses
60174 * non-initialized list entries.
60175 */
60176-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60177-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60178+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60179+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60180
60181 /********** include/linux/timer.h **********/
60182 /*
60183diff -urNp linux-2.6.32.45/include/linux/posix-timers.h linux-2.6.32.45/include/linux/posix-timers.h
60184--- linux-2.6.32.45/include/linux/posix-timers.h 2011-03-27 14:31:47.000000000 -0400
60185+++ linux-2.6.32.45/include/linux/posix-timers.h 2011-08-05 20:33:55.000000000 -0400
60186@@ -67,7 +67,7 @@ struct k_itimer {
60187 };
60188
60189 struct k_clock {
60190- int res; /* in nanoseconds */
60191+ const int res; /* in nanoseconds */
60192 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
60193 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
60194 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
60195diff -urNp linux-2.6.32.45/include/linux/preempt.h linux-2.6.32.45/include/linux/preempt.h
60196--- linux-2.6.32.45/include/linux/preempt.h 2011-03-27 14:31:47.000000000 -0400
60197+++ linux-2.6.32.45/include/linux/preempt.h 2011-08-05 20:33:55.000000000 -0400
60198@@ -110,7 +110,7 @@ struct preempt_ops {
60199 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60200 void (*sched_out)(struct preempt_notifier *notifier,
60201 struct task_struct *next);
60202-};
60203+} __no_const;
60204
60205 /**
60206 * preempt_notifier - key for installing preemption notifiers
60207diff -urNp linux-2.6.32.45/include/linux/proc_fs.h linux-2.6.32.45/include/linux/proc_fs.h
60208--- linux-2.6.32.45/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
60209+++ linux-2.6.32.45/include/linux/proc_fs.h 2011-08-05 20:33:55.000000000 -0400
60210@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
60211 return proc_create_data(name, mode, parent, proc_fops, NULL);
60212 }
60213
60214+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60215+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60216+{
60217+#ifdef CONFIG_GRKERNSEC_PROC_USER
60218+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60219+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60220+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60221+#else
60222+ return proc_create_data(name, mode, parent, proc_fops, NULL);
60223+#endif
60224+}
60225+
60226+
60227 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60228 mode_t mode, struct proc_dir_entry *base,
60229 read_proc_t *read_proc, void * data)
60230@@ -256,7 +269,7 @@ union proc_op {
60231 int (*proc_show)(struct seq_file *m,
60232 struct pid_namespace *ns, struct pid *pid,
60233 struct task_struct *task);
60234-};
60235+} __no_const;
60236
60237 struct ctl_table_header;
60238 struct ctl_table;
60239diff -urNp linux-2.6.32.45/include/linux/ptrace.h linux-2.6.32.45/include/linux/ptrace.h
60240--- linux-2.6.32.45/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
60241+++ linux-2.6.32.45/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
60242@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
60243 extern void exit_ptrace(struct task_struct *tracer);
60244 #define PTRACE_MODE_READ 1
60245 #define PTRACE_MODE_ATTACH 2
60246-/* Returns 0 on success, -errno on denial. */
60247-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60248 /* Returns true on success, false on denial. */
60249 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60250+/* Returns true on success, false on denial. */
60251+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60252
60253 static inline int ptrace_reparented(struct task_struct *child)
60254 {
60255diff -urNp linux-2.6.32.45/include/linux/random.h linux-2.6.32.45/include/linux/random.h
60256--- linux-2.6.32.45/include/linux/random.h 2011-08-16 20:37:25.000000000 -0400
60257+++ linux-2.6.32.45/include/linux/random.h 2011-08-07 19:48:09.000000000 -0400
60258@@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned l
60259 u32 random32(void);
60260 void srandom32(u32 seed);
60261
60262+static inline unsigned long pax_get_random_long(void)
60263+{
60264+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60265+}
60266+
60267 #endif /* __KERNEL___ */
60268
60269 #endif /* _LINUX_RANDOM_H */
60270diff -urNp linux-2.6.32.45/include/linux/reboot.h linux-2.6.32.45/include/linux/reboot.h
60271--- linux-2.6.32.45/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
60272+++ linux-2.6.32.45/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
60273@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
60274 * Architecture-specific implementations of sys_reboot commands.
60275 */
60276
60277-extern void machine_restart(char *cmd);
60278-extern void machine_halt(void);
60279-extern void machine_power_off(void);
60280+extern void machine_restart(char *cmd) __noreturn;
60281+extern void machine_halt(void) __noreturn;
60282+extern void machine_power_off(void) __noreturn;
60283
60284 extern void machine_shutdown(void);
60285 struct pt_regs;
60286@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
60287 */
60288
60289 extern void kernel_restart_prepare(char *cmd);
60290-extern void kernel_restart(char *cmd);
60291-extern void kernel_halt(void);
60292-extern void kernel_power_off(void);
60293+extern void kernel_restart(char *cmd) __noreturn;
60294+extern void kernel_halt(void) __noreturn;
60295+extern void kernel_power_off(void) __noreturn;
60296
60297 void ctrl_alt_del(void);
60298
60299@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
60300 * Emergency restart, callable from an interrupt handler.
60301 */
60302
60303-extern void emergency_restart(void);
60304+extern void emergency_restart(void) __noreturn;
60305 #include <asm/emergency-restart.h>
60306
60307 #endif
60308diff -urNp linux-2.6.32.45/include/linux/reiserfs_fs.h linux-2.6.32.45/include/linux/reiserfs_fs.h
60309--- linux-2.6.32.45/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
60310+++ linux-2.6.32.45/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
60311@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
60312 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60313
60314 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60315-#define get_generation(s) atomic_read (&fs_generation(s))
60316+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60317 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60318 #define __fs_changed(gen,s) (gen != get_generation (s))
60319 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
60320@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
60321 */
60322
60323 struct item_operations {
60324- int (*bytes_number) (struct item_head * ih, int block_size);
60325- void (*decrement_key) (struct cpu_key *);
60326- int (*is_left_mergeable) (struct reiserfs_key * ih,
60327+ int (* const bytes_number) (struct item_head * ih, int block_size);
60328+ void (* const decrement_key) (struct cpu_key *);
60329+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
60330 unsigned long bsize);
60331- void (*print_item) (struct item_head *, char *item);
60332- void (*check_item) (struct item_head *, char *item);
60333+ void (* const print_item) (struct item_head *, char *item);
60334+ void (* const check_item) (struct item_head *, char *item);
60335
60336- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
60337+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
60338 int is_affected, int insert_size);
60339- int (*check_left) (struct virtual_item * vi, int free,
60340+ int (* const check_left) (struct virtual_item * vi, int free,
60341 int start_skip, int end_skip);
60342- int (*check_right) (struct virtual_item * vi, int free);
60343- int (*part_size) (struct virtual_item * vi, int from, int to);
60344- int (*unit_num) (struct virtual_item * vi);
60345- void (*print_vi) (struct virtual_item * vi);
60346+ int (* const check_right) (struct virtual_item * vi, int free);
60347+ int (* const part_size) (struct virtual_item * vi, int from, int to);
60348+ int (* const unit_num) (struct virtual_item * vi);
60349+ void (* const print_vi) (struct virtual_item * vi);
60350 };
60351
60352-extern struct item_operations *item_ops[TYPE_ANY + 1];
60353+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
60354
60355 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
60356 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
60357diff -urNp linux-2.6.32.45/include/linux/reiserfs_fs_sb.h linux-2.6.32.45/include/linux/reiserfs_fs_sb.h
60358--- linux-2.6.32.45/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
60359+++ linux-2.6.32.45/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
60360@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
60361 /* Comment? -Hans */
60362 wait_queue_head_t s_wait;
60363 /* To be obsoleted soon by per buffer seals.. -Hans */
60364- atomic_t s_generation_counter; // increased by one every time the
60365+ atomic_unchecked_t s_generation_counter; // increased by one every time the
60366 // tree gets re-balanced
60367 unsigned long s_properties; /* File system properties. Currently holds
60368 on-disk FS format */
60369diff -urNp linux-2.6.32.45/include/linux/relay.h linux-2.6.32.45/include/linux/relay.h
60370--- linux-2.6.32.45/include/linux/relay.h 2011-03-27 14:31:47.000000000 -0400
60371+++ linux-2.6.32.45/include/linux/relay.h 2011-08-05 20:33:55.000000000 -0400
60372@@ -159,7 +159,7 @@ struct rchan_callbacks
60373 * The callback should return 0 if successful, negative if not.
60374 */
60375 int (*remove_buf_file)(struct dentry *dentry);
60376-};
60377+} __no_const;
60378
60379 /*
60380 * CONFIG_RELAY kernel API, kernel/relay.c
60381diff -urNp linux-2.6.32.45/include/linux/sched.h linux-2.6.32.45/include/linux/sched.h
60382--- linux-2.6.32.45/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
60383+++ linux-2.6.32.45/include/linux/sched.h 2011-08-11 19:48:55.000000000 -0400
60384@@ -101,6 +101,7 @@ struct bio;
60385 struct fs_struct;
60386 struct bts_context;
60387 struct perf_event_context;
60388+struct linux_binprm;
60389
60390 /*
60391 * List of flags we want to share for kernel threads,
60392@@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
60393 extern signed long schedule_timeout_uninterruptible(signed long timeout);
60394 asmlinkage void __schedule(void);
60395 asmlinkage void schedule(void);
60396-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
60397+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
60398
60399 struct nsproxy;
60400 struct user_namespace;
60401@@ -371,9 +372,12 @@ struct user_namespace;
60402 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60403
60404 extern int sysctl_max_map_count;
60405+extern unsigned long sysctl_heap_stack_gap;
60406
60407 #include <linux/aio.h>
60408
60409+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60410+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60411 extern unsigned long
60412 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60413 unsigned long, unsigned long);
60414@@ -666,6 +670,16 @@ struct signal_struct {
60415 struct tty_audit_buf *tty_audit_buf;
60416 #endif
60417
60418+#ifdef CONFIG_GRKERNSEC
60419+ u32 curr_ip;
60420+ u32 saved_ip;
60421+ u32 gr_saddr;
60422+ u32 gr_daddr;
60423+ u16 gr_sport;
60424+ u16 gr_dport;
60425+ u8 used_accept:1;
60426+#endif
60427+
60428 int oom_adj; /* OOM kill score adjustment (bit shift) */
60429 };
60430
60431@@ -723,6 +737,11 @@ struct user_struct {
60432 struct key *session_keyring; /* UID's default session keyring */
60433 #endif
60434
60435+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60436+ unsigned int banned;
60437+ unsigned long ban_expires;
60438+#endif
60439+
60440 /* Hash table maintenance information */
60441 struct hlist_node uidhash_node;
60442 uid_t uid;
60443@@ -1328,8 +1347,8 @@ struct task_struct {
60444 struct list_head thread_group;
60445
60446 struct completion *vfork_done; /* for vfork() */
60447- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60448- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60449+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60450+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60451
60452 cputime_t utime, stime, utimescaled, stimescaled;
60453 cputime_t gtime;
60454@@ -1343,16 +1362,6 @@ struct task_struct {
60455 struct task_cputime cputime_expires;
60456 struct list_head cpu_timers[3];
60457
60458-/* process credentials */
60459- const struct cred *real_cred; /* objective and real subjective task
60460- * credentials (COW) */
60461- const struct cred *cred; /* effective (overridable) subjective task
60462- * credentials (COW) */
60463- struct mutex cred_guard_mutex; /* guard against foreign influences on
60464- * credential calculations
60465- * (notably. ptrace) */
60466- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60467-
60468 char comm[TASK_COMM_LEN]; /* executable name excluding path
60469 - access with [gs]et_task_comm (which lock
60470 it with task_lock())
60471@@ -1369,6 +1378,10 @@ struct task_struct {
60472 #endif
60473 /* CPU-specific state of this task */
60474 struct thread_struct thread;
60475+/* thread_info moved to task_struct */
60476+#ifdef CONFIG_X86
60477+ struct thread_info tinfo;
60478+#endif
60479 /* filesystem information */
60480 struct fs_struct *fs;
60481 /* open file information */
60482@@ -1436,6 +1449,15 @@ struct task_struct {
60483 int hardirq_context;
60484 int softirq_context;
60485 #endif
60486+
60487+/* process credentials */
60488+ const struct cred *real_cred; /* objective and real subjective task
60489+ * credentials (COW) */
60490+ struct mutex cred_guard_mutex; /* guard against foreign influences on
60491+ * credential calculations
60492+ * (notably. ptrace) */
60493+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60494+
60495 #ifdef CONFIG_LOCKDEP
60496 # define MAX_LOCK_DEPTH 48UL
60497 u64 curr_chain_key;
60498@@ -1456,6 +1478,9 @@ struct task_struct {
60499
60500 struct backing_dev_info *backing_dev_info;
60501
60502+ const struct cred *cred; /* effective (overridable) subjective task
60503+ * credentials (COW) */
60504+
60505 struct io_context *io_context;
60506
60507 unsigned long ptrace_message;
60508@@ -1519,6 +1544,21 @@ struct task_struct {
60509 unsigned long default_timer_slack_ns;
60510
60511 struct list_head *scm_work_list;
60512+
60513+#ifdef CONFIG_GRKERNSEC
60514+ /* grsecurity */
60515+ struct dentry *gr_chroot_dentry;
60516+ struct acl_subject_label *acl;
60517+ struct acl_role_label *role;
60518+ struct file *exec_file;
60519+ u16 acl_role_id;
60520+ /* is this the task that authenticated to the special role */
60521+ u8 acl_sp_role;
60522+ u8 is_writable;
60523+ u8 brute;
60524+ u8 gr_is_chrooted;
60525+#endif
60526+
60527 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60528 /* Index of current stored adress in ret_stack */
60529 int curr_ret_stack;
60530@@ -1542,6 +1582,57 @@ struct task_struct {
60531 #endif /* CONFIG_TRACING */
60532 };
60533
60534+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60535+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60536+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60537+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60538+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60539+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60540+
60541+#ifdef CONFIG_PAX_SOFTMODE
60542+extern int pax_softmode;
60543+#endif
60544+
60545+extern int pax_check_flags(unsigned long *);
60546+
60547+/* if tsk != current then task_lock must be held on it */
60548+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60549+static inline unsigned long pax_get_flags(struct task_struct *tsk)
60550+{
60551+ if (likely(tsk->mm))
60552+ return tsk->mm->pax_flags;
60553+ else
60554+ return 0UL;
60555+}
60556+
60557+/* if tsk != current then task_lock must be held on it */
60558+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60559+{
60560+ if (likely(tsk->mm)) {
60561+ tsk->mm->pax_flags = flags;
60562+ return 0;
60563+ }
60564+ return -EINVAL;
60565+}
60566+#endif
60567+
60568+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60569+extern void pax_set_initial_flags(struct linux_binprm *bprm);
60570+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60571+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60572+#endif
60573+
60574+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60575+extern void pax_report_insns(void *pc, void *sp);
60576+extern void pax_report_refcount_overflow(struct pt_regs *regs);
60577+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60578+
60579+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
60580+extern void pax_track_stack(void);
60581+#else
60582+static inline void pax_track_stack(void) {}
60583+#endif
60584+
60585 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60586 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
60587
60588@@ -1740,7 +1831,7 @@ extern void thread_group_times(struct ta
60589 #define PF_DUMPCORE 0x00000200 /* dumped core */
60590 #define PF_SIGNALED 0x00000400 /* killed by a signal */
60591 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
60592-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
60593+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
60594 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
60595 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
60596 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
60597@@ -1978,7 +2069,9 @@ void yield(void);
60598 extern struct exec_domain default_exec_domain;
60599
60600 union thread_union {
60601+#ifndef CONFIG_X86
60602 struct thread_info thread_info;
60603+#endif
60604 unsigned long stack[THREAD_SIZE/sizeof(long)];
60605 };
60606
60607@@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
60608 */
60609
60610 extern struct task_struct *find_task_by_vpid(pid_t nr);
60611+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60612 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60613 struct pid_namespace *ns);
60614
60615@@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sig
60616 extern void exit_itimers(struct signal_struct *);
60617 extern void flush_itimer_signals(void);
60618
60619-extern NORET_TYPE void do_group_exit(int);
60620+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60621
60622 extern void daemonize(const char *, ...);
60623 extern int allow_signal(int);
60624@@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stac
60625
60626 #endif
60627
60628-static inline int object_is_on_stack(void *obj)
60629+static inline int object_starts_on_stack(void *obj)
60630 {
60631- void *stack = task_stack_page(current);
60632+ const void *stack = task_stack_page(current);
60633
60634 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60635 }
60636
60637+#ifdef CONFIG_PAX_USERCOPY
60638+extern int object_is_on_stack(const void *obj, unsigned long len);
60639+#endif
60640+
60641 extern void thread_info_cache_init(void);
60642
60643 #ifdef CONFIG_DEBUG_STACK_USAGE
60644diff -urNp linux-2.6.32.45/include/linux/screen_info.h linux-2.6.32.45/include/linux/screen_info.h
60645--- linux-2.6.32.45/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
60646+++ linux-2.6.32.45/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
60647@@ -42,7 +42,8 @@ struct screen_info {
60648 __u16 pages; /* 0x32 */
60649 __u16 vesa_attributes; /* 0x34 */
60650 __u32 capabilities; /* 0x36 */
60651- __u8 _reserved[6]; /* 0x3a */
60652+ __u16 vesapm_size; /* 0x3a */
60653+ __u8 _reserved[4]; /* 0x3c */
60654 } __attribute__((packed));
60655
60656 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60657diff -urNp linux-2.6.32.45/include/linux/security.h linux-2.6.32.45/include/linux/security.h
60658--- linux-2.6.32.45/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
60659+++ linux-2.6.32.45/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
60660@@ -34,6 +34,7 @@
60661 #include <linux/key.h>
60662 #include <linux/xfrm.h>
60663 #include <linux/gfp.h>
60664+#include <linux/grsecurity.h>
60665 #include <net/flow.h>
60666
60667 /* Maximum number of letters for an LSM name string */
60668diff -urNp linux-2.6.32.45/include/linux/shm.h linux-2.6.32.45/include/linux/shm.h
60669--- linux-2.6.32.45/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
60670+++ linux-2.6.32.45/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
60671@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
60672 pid_t shm_cprid;
60673 pid_t shm_lprid;
60674 struct user_struct *mlock_user;
60675+#ifdef CONFIG_GRKERNSEC
60676+ time_t shm_createtime;
60677+ pid_t shm_lapid;
60678+#endif
60679 };
60680
60681 /* shm_mode upper byte flags */
60682diff -urNp linux-2.6.32.45/include/linux/skbuff.h linux-2.6.32.45/include/linux/skbuff.h
60683--- linux-2.6.32.45/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
60684+++ linux-2.6.32.45/include/linux/skbuff.h 2011-08-21 15:27:56.000000000 -0400
60685@@ -14,6 +14,7 @@
60686 #ifndef _LINUX_SKBUFF_H
60687 #define _LINUX_SKBUFF_H
60688
60689+#include <linux/const.h>
60690 #include <linux/kernel.h>
60691 #include <linux/kmemcheck.h>
60692 #include <linux/compiler.h>
60693@@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_t
60694 */
60695 static inline int skb_queue_empty(const struct sk_buff_head *list)
60696 {
60697- return list->next == (struct sk_buff *)list;
60698+ return list->next == (const struct sk_buff *)list;
60699 }
60700
60701 /**
60702@@ -557,7 +558,7 @@ static inline int skb_queue_empty(const
60703 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60704 const struct sk_buff *skb)
60705 {
60706- return (skb->next == (struct sk_buff *) list);
60707+ return (skb->next == (const struct sk_buff *) list);
60708 }
60709
60710 /**
60711@@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(con
60712 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60713 const struct sk_buff *skb)
60714 {
60715- return (skb->prev == (struct sk_buff *) list);
60716+ return (skb->prev == (const struct sk_buff *) list);
60717 }
60718
60719 /**
60720@@ -1367,7 +1368,7 @@ static inline int skb_network_offset(con
60721 * headroom, you should not reduce this.
60722 */
60723 #ifndef NET_SKB_PAD
60724-#define NET_SKB_PAD 32
60725+#define NET_SKB_PAD (_AC(32,UL))
60726 #endif
60727
60728 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60729diff -urNp linux-2.6.32.45/include/linux/slab_def.h linux-2.6.32.45/include/linux/slab_def.h
60730--- linux-2.6.32.45/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
60731+++ linux-2.6.32.45/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
60732@@ -69,10 +69,10 @@ struct kmem_cache {
60733 unsigned long node_allocs;
60734 unsigned long node_frees;
60735 unsigned long node_overflow;
60736- atomic_t allochit;
60737- atomic_t allocmiss;
60738- atomic_t freehit;
60739- atomic_t freemiss;
60740+ atomic_unchecked_t allochit;
60741+ atomic_unchecked_t allocmiss;
60742+ atomic_unchecked_t freehit;
60743+ atomic_unchecked_t freemiss;
60744
60745 /*
60746 * If debugging is enabled, then the allocator can add additional
60747diff -urNp linux-2.6.32.45/include/linux/slab.h linux-2.6.32.45/include/linux/slab.h
60748--- linux-2.6.32.45/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
60749+++ linux-2.6.32.45/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
60750@@ -11,12 +11,20 @@
60751
60752 #include <linux/gfp.h>
60753 #include <linux/types.h>
60754+#include <linux/err.h>
60755
60756 /*
60757 * Flags to pass to kmem_cache_create().
60758 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60759 */
60760 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60761+
60762+#ifdef CONFIG_PAX_USERCOPY
60763+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60764+#else
60765+#define SLAB_USERCOPY 0x00000000UL
60766+#endif
60767+
60768 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60769 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60770 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60771@@ -82,10 +90,13 @@
60772 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60773 * Both make kfree a no-op.
60774 */
60775-#define ZERO_SIZE_PTR ((void *)16)
60776+#define ZERO_SIZE_PTR \
60777+({ \
60778+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60779+ (void *)(-MAX_ERRNO-1L); \
60780+})
60781
60782-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60783- (unsigned long)ZERO_SIZE_PTR)
60784+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60785
60786 /*
60787 * struct kmem_cache related prototypes
60788@@ -138,6 +149,7 @@ void * __must_check krealloc(const void
60789 void kfree(const void *);
60790 void kzfree(const void *);
60791 size_t ksize(const void *);
60792+void check_object_size(const void *ptr, unsigned long n, bool to);
60793
60794 /*
60795 * Allocator specific definitions. These are mainly used to establish optimized
60796@@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
60797
60798 void __init kmem_cache_init_late(void);
60799
60800+#define kmalloc(x, y) \
60801+({ \
60802+ void *___retval; \
60803+ intoverflow_t ___x = (intoverflow_t)x; \
60804+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
60805+ ___retval = NULL; \
60806+ else \
60807+ ___retval = kmalloc((size_t)___x, (y)); \
60808+ ___retval; \
60809+})
60810+
60811+#define kmalloc_node(x, y, z) \
60812+({ \
60813+ void *___retval; \
60814+ intoverflow_t ___x = (intoverflow_t)x; \
60815+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60816+ ___retval = NULL; \
60817+ else \
60818+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
60819+ ___retval; \
60820+})
60821+
60822+#define kzalloc(x, y) \
60823+({ \
60824+ void *___retval; \
60825+ intoverflow_t ___x = (intoverflow_t)x; \
60826+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
60827+ ___retval = NULL; \
60828+ else \
60829+ ___retval = kzalloc((size_t)___x, (y)); \
60830+ ___retval; \
60831+})
60832+
60833 #endif /* _LINUX_SLAB_H */
60834diff -urNp linux-2.6.32.45/include/linux/slub_def.h linux-2.6.32.45/include/linux/slub_def.h
60835--- linux-2.6.32.45/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
60836+++ linux-2.6.32.45/include/linux/slub_def.h 2011-08-05 20:33:55.000000000 -0400
60837@@ -86,7 +86,7 @@ struct kmem_cache {
60838 struct kmem_cache_order_objects max;
60839 struct kmem_cache_order_objects min;
60840 gfp_t allocflags; /* gfp flags to use on each alloc */
60841- int refcount; /* Refcount for slab cache destroy */
60842+ atomic_t refcount; /* Refcount for slab cache destroy */
60843 void (*ctor)(void *);
60844 int inuse; /* Offset to metadata */
60845 int align; /* Alignment */
60846@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache
60847 #endif
60848
60849 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60850-void *__kmalloc(size_t size, gfp_t flags);
60851+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60852
60853 #ifdef CONFIG_KMEMTRACE
60854 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
60855diff -urNp linux-2.6.32.45/include/linux/sonet.h linux-2.6.32.45/include/linux/sonet.h
60856--- linux-2.6.32.45/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
60857+++ linux-2.6.32.45/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
60858@@ -61,7 +61,7 @@ struct sonet_stats {
60859 #include <asm/atomic.h>
60860
60861 struct k_sonet_stats {
60862-#define __HANDLE_ITEM(i) atomic_t i
60863+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60864 __SONET_ITEMS
60865 #undef __HANDLE_ITEM
60866 };
60867diff -urNp linux-2.6.32.45/include/linux/sunrpc/cache.h linux-2.6.32.45/include/linux/sunrpc/cache.h
60868--- linux-2.6.32.45/include/linux/sunrpc/cache.h 2011-03-27 14:31:47.000000000 -0400
60869+++ linux-2.6.32.45/include/linux/sunrpc/cache.h 2011-08-05 20:33:55.000000000 -0400
60870@@ -125,7 +125,7 @@ struct cache_detail {
60871 */
60872 struct cache_req {
60873 struct cache_deferred_req *(*defer)(struct cache_req *req);
60874-};
60875+} __no_const;
60876 /* this must be embedded in a deferred_request that is being
60877 * delayed awaiting cache-fill
60878 */
60879diff -urNp linux-2.6.32.45/include/linux/sunrpc/clnt.h linux-2.6.32.45/include/linux/sunrpc/clnt.h
60880--- linux-2.6.32.45/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
60881+++ linux-2.6.32.45/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
60882@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
60883 {
60884 switch (sap->sa_family) {
60885 case AF_INET:
60886- return ntohs(((struct sockaddr_in *)sap)->sin_port);
60887+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60888 case AF_INET6:
60889- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60890+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60891 }
60892 return 0;
60893 }
60894@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
60895 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60896 const struct sockaddr *src)
60897 {
60898- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60899+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60900 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60901
60902 dsin->sin_family = ssin->sin_family;
60903@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
60904 if (sa->sa_family != AF_INET6)
60905 return 0;
60906
60907- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60908+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60909 }
60910
60911 #endif /* __KERNEL__ */
60912diff -urNp linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h
60913--- linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
60914+++ linux-2.6.32.45/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
60915@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60916 extern unsigned int svcrdma_max_requests;
60917 extern unsigned int svcrdma_max_req_size;
60918
60919-extern atomic_t rdma_stat_recv;
60920-extern atomic_t rdma_stat_read;
60921-extern atomic_t rdma_stat_write;
60922-extern atomic_t rdma_stat_sq_starve;
60923-extern atomic_t rdma_stat_rq_starve;
60924-extern atomic_t rdma_stat_rq_poll;
60925-extern atomic_t rdma_stat_rq_prod;
60926-extern atomic_t rdma_stat_sq_poll;
60927-extern atomic_t rdma_stat_sq_prod;
60928+extern atomic_unchecked_t rdma_stat_recv;
60929+extern atomic_unchecked_t rdma_stat_read;
60930+extern atomic_unchecked_t rdma_stat_write;
60931+extern atomic_unchecked_t rdma_stat_sq_starve;
60932+extern atomic_unchecked_t rdma_stat_rq_starve;
60933+extern atomic_unchecked_t rdma_stat_rq_poll;
60934+extern atomic_unchecked_t rdma_stat_rq_prod;
60935+extern atomic_unchecked_t rdma_stat_sq_poll;
60936+extern atomic_unchecked_t rdma_stat_sq_prod;
60937
60938 #define RPCRDMA_VERSION 1
60939
60940diff -urNp linux-2.6.32.45/include/linux/suspend.h linux-2.6.32.45/include/linux/suspend.h
60941--- linux-2.6.32.45/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
60942+++ linux-2.6.32.45/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
60943@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
60944 * which require special recovery actions in that situation.
60945 */
60946 struct platform_suspend_ops {
60947- int (*valid)(suspend_state_t state);
60948- int (*begin)(suspend_state_t state);
60949- int (*prepare)(void);
60950- int (*prepare_late)(void);
60951- int (*enter)(suspend_state_t state);
60952- void (*wake)(void);
60953- void (*finish)(void);
60954- void (*end)(void);
60955- void (*recover)(void);
60956+ int (* const valid)(suspend_state_t state);
60957+ int (* const begin)(suspend_state_t state);
60958+ int (* const prepare)(void);
60959+ int (* const prepare_late)(void);
60960+ int (* const enter)(suspend_state_t state);
60961+ void (* const wake)(void);
60962+ void (* const finish)(void);
60963+ void (* const end)(void);
60964+ void (* const recover)(void);
60965 };
60966
60967 #ifdef CONFIG_SUSPEND
60968@@ -120,7 +120,7 @@ struct platform_suspend_ops {
60969 * suspend_set_ops - set platform dependent suspend operations
60970 * @ops: The new suspend operations to set.
60971 */
60972-extern void suspend_set_ops(struct platform_suspend_ops *ops);
60973+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
60974 extern int suspend_valid_only_mem(suspend_state_t state);
60975
60976 /**
60977@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
60978 #else /* !CONFIG_SUSPEND */
60979 #define suspend_valid_only_mem NULL
60980
60981-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
60982+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
60983 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
60984 #endif /* !CONFIG_SUSPEND */
60985
60986@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
60987 * platforms which require special recovery actions in that situation.
60988 */
60989 struct platform_hibernation_ops {
60990- int (*begin)(void);
60991- void (*end)(void);
60992- int (*pre_snapshot)(void);
60993- void (*finish)(void);
60994- int (*prepare)(void);
60995- int (*enter)(void);
60996- void (*leave)(void);
60997- int (*pre_restore)(void);
60998- void (*restore_cleanup)(void);
60999- void (*recover)(void);
61000+ int (* const begin)(void);
61001+ void (* const end)(void);
61002+ int (* const pre_snapshot)(void);
61003+ void (* const finish)(void);
61004+ int (* const prepare)(void);
61005+ int (* const enter)(void);
61006+ void (* const leave)(void);
61007+ int (* const pre_restore)(void);
61008+ void (* const restore_cleanup)(void);
61009+ void (* const recover)(void);
61010 };
61011
61012 #ifdef CONFIG_HIBERNATION
61013@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
61014 extern void swsusp_unset_page_free(struct page *);
61015 extern unsigned long get_safe_page(gfp_t gfp_mask);
61016
61017-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
61018+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
61019 extern int hibernate(void);
61020 extern bool system_entering_hibernation(void);
61021 #else /* CONFIG_HIBERNATION */
61022@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
61023 static inline void swsusp_set_page_free(struct page *p) {}
61024 static inline void swsusp_unset_page_free(struct page *p) {}
61025
61026-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
61027+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
61028 static inline int hibernate(void) { return -ENOSYS; }
61029 static inline bool system_entering_hibernation(void) { return false; }
61030 #endif /* CONFIG_HIBERNATION */
61031diff -urNp linux-2.6.32.45/include/linux/sysctl.h linux-2.6.32.45/include/linux/sysctl.h
61032--- linux-2.6.32.45/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
61033+++ linux-2.6.32.45/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
61034@@ -164,7 +164,11 @@ enum
61035 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
61036 };
61037
61038-
61039+#ifdef CONFIG_PAX_SOFTMODE
61040+enum {
61041+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
61042+};
61043+#endif
61044
61045 /* CTL_VM names: */
61046 enum
61047@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
61048
61049 extern int proc_dostring(struct ctl_table *, int,
61050 void __user *, size_t *, loff_t *);
61051+extern int proc_dostring_modpriv(struct ctl_table *, int,
61052+ void __user *, size_t *, loff_t *);
61053 extern int proc_dointvec(struct ctl_table *, int,
61054 void __user *, size_t *, loff_t *);
61055 extern int proc_dointvec_minmax(struct ctl_table *, int,
61056@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
61057
61058 extern ctl_handler sysctl_data;
61059 extern ctl_handler sysctl_string;
61060+extern ctl_handler sysctl_string_modpriv;
61061 extern ctl_handler sysctl_intvec;
61062 extern ctl_handler sysctl_jiffies;
61063 extern ctl_handler sysctl_ms_jiffies;
61064diff -urNp linux-2.6.32.45/include/linux/sysfs.h linux-2.6.32.45/include/linux/sysfs.h
61065--- linux-2.6.32.45/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
61066+++ linux-2.6.32.45/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
61067@@ -75,8 +75,8 @@ struct bin_attribute {
61068 };
61069
61070 struct sysfs_ops {
61071- ssize_t (*show)(struct kobject *, struct attribute *,char *);
61072- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
61073+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
61074+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
61075 };
61076
61077 struct sysfs_dirent;
61078diff -urNp linux-2.6.32.45/include/linux/thread_info.h linux-2.6.32.45/include/linux/thread_info.h
61079--- linux-2.6.32.45/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
61080+++ linux-2.6.32.45/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
61081@@ -23,7 +23,7 @@ struct restart_block {
61082 };
61083 /* For futex_wait and futex_wait_requeue_pi */
61084 struct {
61085- u32 *uaddr;
61086+ u32 __user *uaddr;
61087 u32 val;
61088 u32 flags;
61089 u32 bitset;
61090diff -urNp linux-2.6.32.45/include/linux/tty.h linux-2.6.32.45/include/linux/tty.h
61091--- linux-2.6.32.45/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
61092+++ linux-2.6.32.45/include/linux/tty.h 2011-08-05 20:33:55.000000000 -0400
61093@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
61094 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
61095 extern void tty_ldisc_enable(struct tty_struct *tty);
61096
61097-
61098 /* n_tty.c */
61099 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
61100
61101diff -urNp linux-2.6.32.45/include/linux/tty_ldisc.h linux-2.6.32.45/include/linux/tty_ldisc.h
61102--- linux-2.6.32.45/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
61103+++ linux-2.6.32.45/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
61104@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
61105
61106 struct module *owner;
61107
61108- int refcount;
61109+ atomic_t refcount;
61110 };
61111
61112 struct tty_ldisc {
61113diff -urNp linux-2.6.32.45/include/linux/types.h linux-2.6.32.45/include/linux/types.h
61114--- linux-2.6.32.45/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
61115+++ linux-2.6.32.45/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
61116@@ -191,10 +191,26 @@ typedef struct {
61117 volatile int counter;
61118 } atomic_t;
61119
61120+#ifdef CONFIG_PAX_REFCOUNT
61121+typedef struct {
61122+ volatile int counter;
61123+} atomic_unchecked_t;
61124+#else
61125+typedef atomic_t atomic_unchecked_t;
61126+#endif
61127+
61128 #ifdef CONFIG_64BIT
61129 typedef struct {
61130 volatile long counter;
61131 } atomic64_t;
61132+
61133+#ifdef CONFIG_PAX_REFCOUNT
61134+typedef struct {
61135+ volatile long counter;
61136+} atomic64_unchecked_t;
61137+#else
61138+typedef atomic64_t atomic64_unchecked_t;
61139+#endif
61140 #endif
61141
61142 struct ustat {
61143diff -urNp linux-2.6.32.45/include/linux/uaccess.h linux-2.6.32.45/include/linux/uaccess.h
61144--- linux-2.6.32.45/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
61145+++ linux-2.6.32.45/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
61146@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
61147 long ret; \
61148 mm_segment_t old_fs = get_fs(); \
61149 \
61150- set_fs(KERNEL_DS); \
61151 pagefault_disable(); \
61152+ set_fs(KERNEL_DS); \
61153 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
61154- pagefault_enable(); \
61155 set_fs(old_fs); \
61156+ pagefault_enable(); \
61157 ret; \
61158 })
61159
61160@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
61161 * Safely read from address @src to the buffer at @dst. If a kernel fault
61162 * happens, handle that and return -EFAULT.
61163 */
61164-extern long probe_kernel_read(void *dst, void *src, size_t size);
61165+extern long probe_kernel_read(void *dst, const void *src, size_t size);
61166
61167 /*
61168 * probe_kernel_write(): safely attempt to write to a location
61169@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
61170 * Safely write to address @dst from the buffer at @src. If a kernel fault
61171 * happens, handle that and return -EFAULT.
61172 */
61173-extern long probe_kernel_write(void *dst, void *src, size_t size);
61174+extern long probe_kernel_write(void *dst, const void *src, size_t size);
61175
61176 #endif /* __LINUX_UACCESS_H__ */
61177diff -urNp linux-2.6.32.45/include/linux/unaligned/access_ok.h linux-2.6.32.45/include/linux/unaligned/access_ok.h
61178--- linux-2.6.32.45/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
61179+++ linux-2.6.32.45/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
61180@@ -6,32 +6,32 @@
61181
61182 static inline u16 get_unaligned_le16(const void *p)
61183 {
61184- return le16_to_cpup((__le16 *)p);
61185+ return le16_to_cpup((const __le16 *)p);
61186 }
61187
61188 static inline u32 get_unaligned_le32(const void *p)
61189 {
61190- return le32_to_cpup((__le32 *)p);
61191+ return le32_to_cpup((const __le32 *)p);
61192 }
61193
61194 static inline u64 get_unaligned_le64(const void *p)
61195 {
61196- return le64_to_cpup((__le64 *)p);
61197+ return le64_to_cpup((const __le64 *)p);
61198 }
61199
61200 static inline u16 get_unaligned_be16(const void *p)
61201 {
61202- return be16_to_cpup((__be16 *)p);
61203+ return be16_to_cpup((const __be16 *)p);
61204 }
61205
61206 static inline u32 get_unaligned_be32(const void *p)
61207 {
61208- return be32_to_cpup((__be32 *)p);
61209+ return be32_to_cpup((const __be32 *)p);
61210 }
61211
61212 static inline u64 get_unaligned_be64(const void *p)
61213 {
61214- return be64_to_cpup((__be64 *)p);
61215+ return be64_to_cpup((const __be64 *)p);
61216 }
61217
61218 static inline void put_unaligned_le16(u16 val, void *p)
61219diff -urNp linux-2.6.32.45/include/linux/vmalloc.h linux-2.6.32.45/include/linux/vmalloc.h
61220--- linux-2.6.32.45/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
61221+++ linux-2.6.32.45/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
61222@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
61223 #define VM_MAP 0x00000004 /* vmap()ed pages */
61224 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61225 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61226+
61227+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61228+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
61229+#endif
61230+
61231 /* bits [20..32] reserved for arch specific ioremap internals */
61232
61233 /*
61234@@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
61235
61236 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
61237
61238+#define vmalloc(x) \
61239+({ \
61240+ void *___retval; \
61241+ intoverflow_t ___x = (intoverflow_t)x; \
61242+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61243+ ___retval = NULL; \
61244+ else \
61245+ ___retval = vmalloc((unsigned long)___x); \
61246+ ___retval; \
61247+})
61248+
61249+#define __vmalloc(x, y, z) \
61250+({ \
61251+ void *___retval; \
61252+ intoverflow_t ___x = (intoverflow_t)x; \
61253+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61254+ ___retval = NULL; \
61255+ else \
61256+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61257+ ___retval; \
61258+})
61259+
61260+#define vmalloc_user(x) \
61261+({ \
61262+ void *___retval; \
61263+ intoverflow_t ___x = (intoverflow_t)x; \
61264+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61265+ ___retval = NULL; \
61266+ else \
61267+ ___retval = vmalloc_user((unsigned long)___x); \
61268+ ___retval; \
61269+})
61270+
61271+#define vmalloc_exec(x) \
61272+({ \
61273+ void *___retval; \
61274+ intoverflow_t ___x = (intoverflow_t)x; \
61275+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61276+ ___retval = NULL; \
61277+ else \
61278+ ___retval = vmalloc_exec((unsigned long)___x); \
61279+ ___retval; \
61280+})
61281+
61282+#define vmalloc_node(x, y) \
61283+({ \
61284+ void *___retval; \
61285+ intoverflow_t ___x = (intoverflow_t)x; \
61286+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61287+ ___retval = NULL; \
61288+ else \
61289+ ___retval = vmalloc_node((unsigned long)___x, (y));\
61290+ ___retval; \
61291+})
61292+
61293+#define vmalloc_32(x) \
61294+({ \
61295+ void *___retval; \
61296+ intoverflow_t ___x = (intoverflow_t)x; \
61297+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61298+ ___retval = NULL; \
61299+ else \
61300+ ___retval = vmalloc_32((unsigned long)___x); \
61301+ ___retval; \
61302+})
61303+
61304+#define vmalloc_32_user(x) \
61305+({ \
61306+ void *___retval; \
61307+ intoverflow_t ___x = (intoverflow_t)x; \
61308+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61309+ ___retval = NULL; \
61310+ else \
61311+ ___retval = vmalloc_32_user((unsigned long)___x);\
61312+ ___retval; \
61313+})
61314+
61315 #endif /* _LINUX_VMALLOC_H */
61316diff -urNp linux-2.6.32.45/include/linux/vmstat.h linux-2.6.32.45/include/linux/vmstat.h
61317--- linux-2.6.32.45/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
61318+++ linux-2.6.32.45/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
61319@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
61320 /*
61321 * Zone based page accounting with per cpu differentials.
61322 */
61323-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61324+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61325
61326 static inline void zone_page_state_add(long x, struct zone *zone,
61327 enum zone_stat_item item)
61328 {
61329- atomic_long_add(x, &zone->vm_stat[item]);
61330- atomic_long_add(x, &vm_stat[item]);
61331+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61332+ atomic_long_add_unchecked(x, &vm_stat[item]);
61333 }
61334
61335 static inline unsigned long global_page_state(enum zone_stat_item item)
61336 {
61337- long x = atomic_long_read(&vm_stat[item]);
61338+ long x = atomic_long_read_unchecked(&vm_stat[item]);
61339 #ifdef CONFIG_SMP
61340 if (x < 0)
61341 x = 0;
61342@@ -158,7 +158,7 @@ static inline unsigned long global_page_
61343 static inline unsigned long zone_page_state(struct zone *zone,
61344 enum zone_stat_item item)
61345 {
61346- long x = atomic_long_read(&zone->vm_stat[item]);
61347+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61348 #ifdef CONFIG_SMP
61349 if (x < 0)
61350 x = 0;
61351@@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
61352 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61353 enum zone_stat_item item)
61354 {
61355- long x = atomic_long_read(&zone->vm_stat[item]);
61356+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61357
61358 #ifdef CONFIG_SMP
61359 int cpu;
61360@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
61361
61362 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61363 {
61364- atomic_long_inc(&zone->vm_stat[item]);
61365- atomic_long_inc(&vm_stat[item]);
61366+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
61367+ atomic_long_inc_unchecked(&vm_stat[item]);
61368 }
61369
61370 static inline void __inc_zone_page_state(struct page *page,
61371@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
61372
61373 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61374 {
61375- atomic_long_dec(&zone->vm_stat[item]);
61376- atomic_long_dec(&vm_stat[item]);
61377+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
61378+ atomic_long_dec_unchecked(&vm_stat[item]);
61379 }
61380
61381 static inline void __dec_zone_page_state(struct page *page,
61382diff -urNp linux-2.6.32.45/include/media/v4l2-dev.h linux-2.6.32.45/include/media/v4l2-dev.h
61383--- linux-2.6.32.45/include/media/v4l2-dev.h 2011-03-27 14:31:47.000000000 -0400
61384+++ linux-2.6.32.45/include/media/v4l2-dev.h 2011-08-05 20:33:55.000000000 -0400
61385@@ -34,7 +34,7 @@ struct v4l2_device;
61386 #define V4L2_FL_UNREGISTERED (0)
61387
61388 struct v4l2_file_operations {
61389- struct module *owner;
61390+ struct module * const owner;
61391 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61392 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61393 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61394diff -urNp linux-2.6.32.45/include/media/v4l2-device.h linux-2.6.32.45/include/media/v4l2-device.h
61395--- linux-2.6.32.45/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
61396+++ linux-2.6.32.45/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
61397@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
61398 this function returns 0. If the name ends with a digit (e.g. cx18),
61399 then the name will be set to cx18-0 since cx180 looks really odd. */
61400 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
61401- atomic_t *instance);
61402+ atomic_unchecked_t *instance);
61403
61404 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
61405 Since the parent disappears this ensures that v4l2_dev doesn't have an
61406diff -urNp linux-2.6.32.45/include/net/flow.h linux-2.6.32.45/include/net/flow.h
61407--- linux-2.6.32.45/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
61408+++ linux-2.6.32.45/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
61409@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
61410 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
61411 u8 dir, flow_resolve_t resolver);
61412 extern void flow_cache_flush(void);
61413-extern atomic_t flow_cache_genid;
61414+extern atomic_unchecked_t flow_cache_genid;
61415
61416 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
61417 {
61418diff -urNp linux-2.6.32.45/include/net/inetpeer.h linux-2.6.32.45/include/net/inetpeer.h
61419--- linux-2.6.32.45/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
61420+++ linux-2.6.32.45/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
61421@@ -24,7 +24,7 @@ struct inet_peer
61422 __u32 dtime; /* the time of last use of not
61423 * referenced entries */
61424 atomic_t refcnt;
61425- atomic_t rid; /* Frag reception counter */
61426+ atomic_unchecked_t rid; /* Frag reception counter */
61427 __u32 tcp_ts;
61428 unsigned long tcp_ts_stamp;
61429 };
61430diff -urNp linux-2.6.32.45/include/net/ip_vs.h linux-2.6.32.45/include/net/ip_vs.h
61431--- linux-2.6.32.45/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
61432+++ linux-2.6.32.45/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
61433@@ -365,7 +365,7 @@ struct ip_vs_conn {
61434 struct ip_vs_conn *control; /* Master control connection */
61435 atomic_t n_control; /* Number of controlled ones */
61436 struct ip_vs_dest *dest; /* real server */
61437- atomic_t in_pkts; /* incoming packet counter */
61438+ atomic_unchecked_t in_pkts; /* incoming packet counter */
61439
61440 /* packet transmitter for different forwarding methods. If it
61441 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61442@@ -466,7 +466,7 @@ struct ip_vs_dest {
61443 union nf_inet_addr addr; /* IP address of the server */
61444 __be16 port; /* port number of the server */
61445 volatile unsigned flags; /* dest status flags */
61446- atomic_t conn_flags; /* flags to copy to conn */
61447+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
61448 atomic_t weight; /* server weight */
61449
61450 atomic_t refcnt; /* reference counter */
61451diff -urNp linux-2.6.32.45/include/net/irda/ircomm_core.h linux-2.6.32.45/include/net/irda/ircomm_core.h
61452--- linux-2.6.32.45/include/net/irda/ircomm_core.h 2011-03-27 14:31:47.000000000 -0400
61453+++ linux-2.6.32.45/include/net/irda/ircomm_core.h 2011-08-05 20:33:55.000000000 -0400
61454@@ -51,7 +51,7 @@ typedef struct {
61455 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61456 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61457 struct ircomm_info *);
61458-} call_t;
61459+} __no_const call_t;
61460
61461 struct ircomm_cb {
61462 irda_queue_t queue;
61463diff -urNp linux-2.6.32.45/include/net/irda/ircomm_tty.h linux-2.6.32.45/include/net/irda/ircomm_tty.h
61464--- linux-2.6.32.45/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
61465+++ linux-2.6.32.45/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
61466@@ -35,6 +35,7 @@
61467 #include <linux/termios.h>
61468 #include <linux/timer.h>
61469 #include <linux/tty.h> /* struct tty_struct */
61470+#include <asm/local.h>
61471
61472 #include <net/irda/irias_object.h>
61473 #include <net/irda/ircomm_core.h>
61474@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61475 unsigned short close_delay;
61476 unsigned short closing_wait; /* time to wait before closing */
61477
61478- int open_count;
61479- int blocked_open; /* # of blocked opens */
61480+ local_t open_count;
61481+ local_t blocked_open; /* # of blocked opens */
61482
61483 /* Protect concurent access to :
61484 * o self->open_count
61485diff -urNp linux-2.6.32.45/include/net/iucv/af_iucv.h linux-2.6.32.45/include/net/iucv/af_iucv.h
61486--- linux-2.6.32.45/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
61487+++ linux-2.6.32.45/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
61488@@ -87,7 +87,7 @@ struct iucv_sock {
61489 struct iucv_sock_list {
61490 struct hlist_head head;
61491 rwlock_t lock;
61492- atomic_t autobind_name;
61493+ atomic_unchecked_t autobind_name;
61494 };
61495
61496 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61497diff -urNp linux-2.6.32.45/include/net/lapb.h linux-2.6.32.45/include/net/lapb.h
61498--- linux-2.6.32.45/include/net/lapb.h 2011-03-27 14:31:47.000000000 -0400
61499+++ linux-2.6.32.45/include/net/lapb.h 2011-08-05 20:33:55.000000000 -0400
61500@@ -95,7 +95,7 @@ struct lapb_cb {
61501 struct sk_buff_head write_queue;
61502 struct sk_buff_head ack_queue;
61503 unsigned char window;
61504- struct lapb_register_struct callbacks;
61505+ struct lapb_register_struct *callbacks;
61506
61507 /* FRMR control information */
61508 struct lapb_frame frmr_data;
61509diff -urNp linux-2.6.32.45/include/net/neighbour.h linux-2.6.32.45/include/net/neighbour.h
61510--- linux-2.6.32.45/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
61511+++ linux-2.6.32.45/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
61512@@ -125,12 +125,12 @@ struct neighbour
61513 struct neigh_ops
61514 {
61515 int family;
61516- void (*solicit)(struct neighbour *, struct sk_buff*);
61517- void (*error_report)(struct neighbour *, struct sk_buff*);
61518- int (*output)(struct sk_buff*);
61519- int (*connected_output)(struct sk_buff*);
61520- int (*hh_output)(struct sk_buff*);
61521- int (*queue_xmit)(struct sk_buff*);
61522+ void (* const solicit)(struct neighbour *, struct sk_buff*);
61523+ void (* const error_report)(struct neighbour *, struct sk_buff*);
61524+ int (* const output)(struct sk_buff*);
61525+ int (* const connected_output)(struct sk_buff*);
61526+ int (* const hh_output)(struct sk_buff*);
61527+ int (* const queue_xmit)(struct sk_buff*);
61528 };
61529
61530 struct pneigh_entry
61531diff -urNp linux-2.6.32.45/include/net/netlink.h linux-2.6.32.45/include/net/netlink.h
61532--- linux-2.6.32.45/include/net/netlink.h 2011-07-13 17:23:04.000000000 -0400
61533+++ linux-2.6.32.45/include/net/netlink.h 2011-08-21 18:08:11.000000000 -0400
61534@@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct
61535 {
61536 return (remaining >= (int) sizeof(struct nlmsghdr) &&
61537 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
61538- nlh->nlmsg_len <= remaining);
61539+ nlh->nlmsg_len <= (unsigned int)remaining);
61540 }
61541
61542 /**
61543@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
61544 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61545 {
61546 if (mark)
61547- skb_trim(skb, (unsigned char *) mark - skb->data);
61548+ skb_trim(skb, (const unsigned char *) mark - skb->data);
61549 }
61550
61551 /**
61552diff -urNp linux-2.6.32.45/include/net/netns/ipv4.h linux-2.6.32.45/include/net/netns/ipv4.h
61553--- linux-2.6.32.45/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
61554+++ linux-2.6.32.45/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
61555@@ -54,7 +54,7 @@ struct netns_ipv4 {
61556 int current_rt_cache_rebuild_count;
61557
61558 struct timer_list rt_secret_timer;
61559- atomic_t rt_genid;
61560+ atomic_unchecked_t rt_genid;
61561
61562 #ifdef CONFIG_IP_MROUTE
61563 struct sock *mroute_sk;
61564diff -urNp linux-2.6.32.45/include/net/sctp/sctp.h linux-2.6.32.45/include/net/sctp/sctp.h
61565--- linux-2.6.32.45/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
61566+++ linux-2.6.32.45/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
61567@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
61568
61569 #else /* SCTP_DEBUG */
61570
61571-#define SCTP_DEBUG_PRINTK(whatever...)
61572-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61573+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61574+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61575 #define SCTP_ENABLE_DEBUG
61576 #define SCTP_DISABLE_DEBUG
61577 #define SCTP_ASSERT(expr, str, func)
61578diff -urNp linux-2.6.32.45/include/net/secure_seq.h linux-2.6.32.45/include/net/secure_seq.h
61579--- linux-2.6.32.45/include/net/secure_seq.h 2011-08-16 20:37:25.000000000 -0400
61580+++ linux-2.6.32.45/include/net/secure_seq.h 2011-08-07 19:48:09.000000000 -0400
61581@@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
61582 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
61583 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
61584 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
61585- __be16 dport);
61586+ __be16 dport);
61587 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
61588 __be16 sport, __be16 dport);
61589 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61590- __be16 sport, __be16 dport);
61591+ __be16 sport, __be16 dport);
61592 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
61593- __be16 sport, __be16 dport);
61594+ __be16 sport, __be16 dport);
61595 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61596- __be16 sport, __be16 dport);
61597+ __be16 sport, __be16 dport);
61598
61599 #endif /* _NET_SECURE_SEQ */
61600diff -urNp linux-2.6.32.45/include/net/sock.h linux-2.6.32.45/include/net/sock.h
61601--- linux-2.6.32.45/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
61602+++ linux-2.6.32.45/include/net/sock.h 2011-08-21 17:24:37.000000000 -0400
61603@@ -272,7 +272,7 @@ struct sock {
61604 rwlock_t sk_callback_lock;
61605 int sk_err,
61606 sk_err_soft;
61607- atomic_t sk_drops;
61608+ atomic_unchecked_t sk_drops;
61609 unsigned short sk_ack_backlog;
61610 unsigned short sk_max_ack_backlog;
61611 __u32 sk_priority;
61612@@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_relea
61613 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
61614 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
61615 #else
61616-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
61617+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
61618 int inc)
61619 {
61620 }
61621diff -urNp linux-2.6.32.45/include/net/tcp.h linux-2.6.32.45/include/net/tcp.h
61622--- linux-2.6.32.45/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
61623+++ linux-2.6.32.45/include/net/tcp.h 2011-04-17 15:56:46.000000000 -0400
61624@@ -1444,6 +1444,7 @@ enum tcp_seq_states {
61625 struct tcp_seq_afinfo {
61626 char *name;
61627 sa_family_t family;
61628+ /* cannot be const */
61629 struct file_operations seq_fops;
61630 struct seq_operations seq_ops;
61631 };
61632diff -urNp linux-2.6.32.45/include/net/udp.h linux-2.6.32.45/include/net/udp.h
61633--- linux-2.6.32.45/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
61634+++ linux-2.6.32.45/include/net/udp.h 2011-04-17 15:56:46.000000000 -0400
61635@@ -187,6 +187,7 @@ struct udp_seq_afinfo {
61636 char *name;
61637 sa_family_t family;
61638 struct udp_table *udp_table;
61639+ /* cannot be const */
61640 struct file_operations seq_fops;
61641 struct seq_operations seq_ops;
61642 };
61643diff -urNp linux-2.6.32.45/include/rdma/iw_cm.h linux-2.6.32.45/include/rdma/iw_cm.h
61644--- linux-2.6.32.45/include/rdma/iw_cm.h 2011-03-27 14:31:47.000000000 -0400
61645+++ linux-2.6.32.45/include/rdma/iw_cm.h 2011-08-05 20:33:55.000000000 -0400
61646@@ -129,7 +129,7 @@ struct iw_cm_verbs {
61647 int backlog);
61648
61649 int (*destroy_listen)(struct iw_cm_id *cm_id);
61650-};
61651+} __no_const;
61652
61653 /**
61654 * iw_create_cm_id - Create an IW CM identifier.
61655diff -urNp linux-2.6.32.45/include/scsi/scsi_device.h linux-2.6.32.45/include/scsi/scsi_device.h
61656--- linux-2.6.32.45/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
61657+++ linux-2.6.32.45/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
61658@@ -156,9 +156,9 @@ struct scsi_device {
61659 unsigned int max_device_blocked; /* what device_blocked counts down from */
61660 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61661
61662- atomic_t iorequest_cnt;
61663- atomic_t iodone_cnt;
61664- atomic_t ioerr_cnt;
61665+ atomic_unchecked_t iorequest_cnt;
61666+ atomic_unchecked_t iodone_cnt;
61667+ atomic_unchecked_t ioerr_cnt;
61668
61669 struct device sdev_gendev,
61670 sdev_dev;
61671diff -urNp linux-2.6.32.45/include/scsi/scsi_transport_fc.h linux-2.6.32.45/include/scsi/scsi_transport_fc.h
61672--- linux-2.6.32.45/include/scsi/scsi_transport_fc.h 2011-03-27 14:31:47.000000000 -0400
61673+++ linux-2.6.32.45/include/scsi/scsi_transport_fc.h 2011-08-05 20:33:55.000000000 -0400
61674@@ -663,9 +663,9 @@ struct fc_function_template {
61675 int (*bsg_timeout)(struct fc_bsg_job *);
61676
61677 /* allocation lengths for host-specific data */
61678- u32 dd_fcrport_size;
61679- u32 dd_fcvport_size;
61680- u32 dd_bsg_size;
61681+ const u32 dd_fcrport_size;
61682+ const u32 dd_fcvport_size;
61683+ const u32 dd_bsg_size;
61684
61685 /*
61686 * The driver sets these to tell the transport class it
61687@@ -675,39 +675,39 @@ struct fc_function_template {
61688 */
61689
61690 /* remote port fixed attributes */
61691- unsigned long show_rport_maxframe_size:1;
61692- unsigned long show_rport_supported_classes:1;
61693- unsigned long show_rport_dev_loss_tmo:1;
61694+ const unsigned long show_rport_maxframe_size:1;
61695+ const unsigned long show_rport_supported_classes:1;
61696+ const unsigned long show_rport_dev_loss_tmo:1;
61697
61698 /*
61699 * target dynamic attributes
61700 * These should all be "1" if the driver uses the remote port
61701 * add/delete functions (so attributes reflect rport values).
61702 */
61703- unsigned long show_starget_node_name:1;
61704- unsigned long show_starget_port_name:1;
61705- unsigned long show_starget_port_id:1;
61706+ const unsigned long show_starget_node_name:1;
61707+ const unsigned long show_starget_port_name:1;
61708+ const unsigned long show_starget_port_id:1;
61709
61710 /* host fixed attributes */
61711- unsigned long show_host_node_name:1;
61712- unsigned long show_host_port_name:1;
61713- unsigned long show_host_permanent_port_name:1;
61714- unsigned long show_host_supported_classes:1;
61715- unsigned long show_host_supported_fc4s:1;
61716- unsigned long show_host_supported_speeds:1;
61717- unsigned long show_host_maxframe_size:1;
61718- unsigned long show_host_serial_number:1;
61719+ const unsigned long show_host_node_name:1;
61720+ const unsigned long show_host_port_name:1;
61721+ const unsigned long show_host_permanent_port_name:1;
61722+ const unsigned long show_host_supported_classes:1;
61723+ const unsigned long show_host_supported_fc4s:1;
61724+ const unsigned long show_host_supported_speeds:1;
61725+ const unsigned long show_host_maxframe_size:1;
61726+ const unsigned long show_host_serial_number:1;
61727 /* host dynamic attributes */
61728- unsigned long show_host_port_id:1;
61729- unsigned long show_host_port_type:1;
61730- unsigned long show_host_port_state:1;
61731- unsigned long show_host_active_fc4s:1;
61732- unsigned long show_host_speed:1;
61733- unsigned long show_host_fabric_name:1;
61734- unsigned long show_host_symbolic_name:1;
61735- unsigned long show_host_system_hostname:1;
61736+ const unsigned long show_host_port_id:1;
61737+ const unsigned long show_host_port_type:1;
61738+ const unsigned long show_host_port_state:1;
61739+ const unsigned long show_host_active_fc4s:1;
61740+ const unsigned long show_host_speed:1;
61741+ const unsigned long show_host_fabric_name:1;
61742+ const unsigned long show_host_symbolic_name:1;
61743+ const unsigned long show_host_system_hostname:1;
61744
61745- unsigned long disable_target_scan:1;
61746+ const unsigned long disable_target_scan:1;
61747 };
61748
61749
61750diff -urNp linux-2.6.32.45/include/sound/ac97_codec.h linux-2.6.32.45/include/sound/ac97_codec.h
61751--- linux-2.6.32.45/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
61752+++ linux-2.6.32.45/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
61753@@ -419,15 +419,15 @@
61754 struct snd_ac97;
61755
61756 struct snd_ac97_build_ops {
61757- int (*build_3d) (struct snd_ac97 *ac97);
61758- int (*build_specific) (struct snd_ac97 *ac97);
61759- int (*build_spdif) (struct snd_ac97 *ac97);
61760- int (*build_post_spdif) (struct snd_ac97 *ac97);
61761+ int (* const build_3d) (struct snd_ac97 *ac97);
61762+ int (* const build_specific) (struct snd_ac97 *ac97);
61763+ int (* const build_spdif) (struct snd_ac97 *ac97);
61764+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
61765 #ifdef CONFIG_PM
61766- void (*suspend) (struct snd_ac97 *ac97);
61767- void (*resume) (struct snd_ac97 *ac97);
61768+ void (* const suspend) (struct snd_ac97 *ac97);
61769+ void (* const resume) (struct snd_ac97 *ac97);
61770 #endif
61771- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61772+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
61773 };
61774
61775 struct snd_ac97_bus_ops {
61776@@ -477,7 +477,7 @@ struct snd_ac97_template {
61777
61778 struct snd_ac97 {
61779 /* -- lowlevel (hardware) driver specific -- */
61780- struct snd_ac97_build_ops * build_ops;
61781+ const struct snd_ac97_build_ops * build_ops;
61782 void *private_data;
61783 void (*private_free) (struct snd_ac97 *ac97);
61784 /* --- */
61785diff -urNp linux-2.6.32.45/include/sound/ak4xxx-adda.h linux-2.6.32.45/include/sound/ak4xxx-adda.h
61786--- linux-2.6.32.45/include/sound/ak4xxx-adda.h 2011-03-27 14:31:47.000000000 -0400
61787+++ linux-2.6.32.45/include/sound/ak4xxx-adda.h 2011-08-05 20:33:55.000000000 -0400
61788@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61789 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61790 unsigned char val);
61791 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61792-};
61793+} __no_const;
61794
61795 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61796
61797diff -urNp linux-2.6.32.45/include/sound/hwdep.h linux-2.6.32.45/include/sound/hwdep.h
61798--- linux-2.6.32.45/include/sound/hwdep.h 2011-03-27 14:31:47.000000000 -0400
61799+++ linux-2.6.32.45/include/sound/hwdep.h 2011-08-05 20:33:55.000000000 -0400
61800@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61801 struct snd_hwdep_dsp_status *status);
61802 int (*dsp_load)(struct snd_hwdep *hw,
61803 struct snd_hwdep_dsp_image *image);
61804-};
61805+} __no_const;
61806
61807 struct snd_hwdep {
61808 struct snd_card *card;
61809diff -urNp linux-2.6.32.45/include/sound/info.h linux-2.6.32.45/include/sound/info.h
61810--- linux-2.6.32.45/include/sound/info.h 2011-03-27 14:31:47.000000000 -0400
61811+++ linux-2.6.32.45/include/sound/info.h 2011-08-05 20:33:55.000000000 -0400
61812@@ -44,7 +44,7 @@ struct snd_info_entry_text {
61813 struct snd_info_buffer *buffer);
61814 void (*write)(struct snd_info_entry *entry,
61815 struct snd_info_buffer *buffer);
61816-};
61817+} __no_const;
61818
61819 struct snd_info_entry_ops {
61820 int (*open)(struct snd_info_entry *entry,
61821diff -urNp linux-2.6.32.45/include/sound/sb16_csp.h linux-2.6.32.45/include/sound/sb16_csp.h
61822--- linux-2.6.32.45/include/sound/sb16_csp.h 2011-03-27 14:31:47.000000000 -0400
61823+++ linux-2.6.32.45/include/sound/sb16_csp.h 2011-08-05 20:33:55.000000000 -0400
61824@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
61825 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61826 int (*csp_stop) (struct snd_sb_csp * p);
61827 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61828-};
61829+} __no_const;
61830
61831 /*
61832 * CSP private data
61833diff -urNp linux-2.6.32.45/include/sound/ymfpci.h linux-2.6.32.45/include/sound/ymfpci.h
61834--- linux-2.6.32.45/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
61835+++ linux-2.6.32.45/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
61836@@ -358,7 +358,7 @@ struct snd_ymfpci {
61837 spinlock_t reg_lock;
61838 spinlock_t voice_lock;
61839 wait_queue_head_t interrupt_sleep;
61840- atomic_t interrupt_sleep_count;
61841+ atomic_unchecked_t interrupt_sleep_count;
61842 struct snd_info_entry *proc_entry;
61843 const struct firmware *dsp_microcode;
61844 const struct firmware *controller_microcode;
61845diff -urNp linux-2.6.32.45/include/trace/events/irq.h linux-2.6.32.45/include/trace/events/irq.h
61846--- linux-2.6.32.45/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
61847+++ linux-2.6.32.45/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
61848@@ -34,7 +34,7 @@
61849 */
61850 TRACE_EVENT(irq_handler_entry,
61851
61852- TP_PROTO(int irq, struct irqaction *action),
61853+ TP_PROTO(int irq, const struct irqaction *action),
61854
61855 TP_ARGS(irq, action),
61856
61857@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
61858 */
61859 TRACE_EVENT(irq_handler_exit,
61860
61861- TP_PROTO(int irq, struct irqaction *action, int ret),
61862+ TP_PROTO(int irq, const struct irqaction *action, int ret),
61863
61864 TP_ARGS(irq, action, ret),
61865
61866@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
61867 */
61868 TRACE_EVENT(softirq_entry,
61869
61870- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61871+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61872
61873 TP_ARGS(h, vec),
61874
61875@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
61876 */
61877 TRACE_EVENT(softirq_exit,
61878
61879- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
61880+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
61881
61882 TP_ARGS(h, vec),
61883
61884diff -urNp linux-2.6.32.45/include/video/uvesafb.h linux-2.6.32.45/include/video/uvesafb.h
61885--- linux-2.6.32.45/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
61886+++ linux-2.6.32.45/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
61887@@ -177,6 +177,7 @@ struct uvesafb_par {
61888 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61889 u8 pmi_setpal; /* PMI for palette changes */
61890 u16 *pmi_base; /* protected mode interface location */
61891+ u8 *pmi_code; /* protected mode code location */
61892 void *pmi_start;
61893 void *pmi_pal;
61894 u8 *vbe_state_orig; /*
61895diff -urNp linux-2.6.32.45/init/do_mounts.c linux-2.6.32.45/init/do_mounts.c
61896--- linux-2.6.32.45/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
61897+++ linux-2.6.32.45/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
61898@@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
61899
61900 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61901 {
61902- int err = sys_mount(name, "/root", fs, flags, data);
61903+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
61904 if (err)
61905 return err;
61906
61907- sys_chdir("/root");
61908+ sys_chdir((__force const char __user *)"/root");
61909 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61910 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
61911 current->fs->pwd.mnt->mnt_sb->s_type->name,
61912@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
61913 va_start(args, fmt);
61914 vsprintf(buf, fmt, args);
61915 va_end(args);
61916- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
61917+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
61918 if (fd >= 0) {
61919 sys_ioctl(fd, FDEJECT, 0);
61920 sys_close(fd);
61921 }
61922 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
61923- fd = sys_open("/dev/console", O_RDWR, 0);
61924+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
61925 if (fd >= 0) {
61926 sys_ioctl(fd, TCGETS, (long)&termios);
61927 termios.c_lflag &= ~ICANON;
61928 sys_ioctl(fd, TCSETSF, (long)&termios);
61929- sys_read(fd, &c, 1);
61930+ sys_read(fd, (char __user *)&c, 1);
61931 termios.c_lflag |= ICANON;
61932 sys_ioctl(fd, TCSETSF, (long)&termios);
61933 sys_close(fd);
61934@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
61935 mount_root();
61936 out:
61937 devtmpfs_mount("dev");
61938- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61939- sys_chroot(".");
61940+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61941+ sys_chroot((__force char __user *)".");
61942 }
61943diff -urNp linux-2.6.32.45/init/do_mounts.h linux-2.6.32.45/init/do_mounts.h
61944--- linux-2.6.32.45/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
61945+++ linux-2.6.32.45/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
61946@@ -15,15 +15,15 @@ extern int root_mountflags;
61947
61948 static inline int create_dev(char *name, dev_t dev)
61949 {
61950- sys_unlink(name);
61951- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
61952+ sys_unlink((__force char __user *)name);
61953+ return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
61954 }
61955
61956 #if BITS_PER_LONG == 32
61957 static inline u32 bstat(char *name)
61958 {
61959 struct stat64 stat;
61960- if (sys_stat64(name, &stat) != 0)
61961+ if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
61962 return 0;
61963 if (!S_ISBLK(stat.st_mode))
61964 return 0;
61965diff -urNp linux-2.6.32.45/init/do_mounts_initrd.c linux-2.6.32.45/init/do_mounts_initrd.c
61966--- linux-2.6.32.45/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
61967+++ linux-2.6.32.45/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
61968@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
61969 sys_close(old_fd);sys_close(root_fd);
61970 sys_close(0);sys_close(1);sys_close(2);
61971 sys_setsid();
61972- (void) sys_open("/dev/console",O_RDWR,0);
61973+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
61974 (void) sys_dup(0);
61975 (void) sys_dup(0);
61976 return kernel_execve(shell, argv, envp_init);
61977@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
61978 create_dev("/dev/root.old", Root_RAM0);
61979 /* mount initrd on rootfs' /root */
61980 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
61981- sys_mkdir("/old", 0700);
61982- root_fd = sys_open("/", 0, 0);
61983- old_fd = sys_open("/old", 0, 0);
61984+ sys_mkdir((__force const char __user *)"/old", 0700);
61985+ root_fd = sys_open((__force const char __user *)"/", 0, 0);
61986+ old_fd = sys_open((__force const char __user *)"/old", 0, 0);
61987 /* move initrd over / and chdir/chroot in initrd root */
61988- sys_chdir("/root");
61989- sys_mount(".", "/", NULL, MS_MOVE, NULL);
61990- sys_chroot(".");
61991+ sys_chdir((__force const char __user *)"/root");
61992+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
61993+ sys_chroot((__force const char __user *)".");
61994
61995 /*
61996 * In case that a resume from disk is carried out by linuxrc or one of
61997@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
61998
61999 /* move initrd to rootfs' /old */
62000 sys_fchdir(old_fd);
62001- sys_mount("/", ".", NULL, MS_MOVE, NULL);
62002+ sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
62003 /* switch root and cwd back to / of rootfs */
62004 sys_fchdir(root_fd);
62005- sys_chroot(".");
62006+ sys_chroot((__force const char __user *)".");
62007 sys_close(old_fd);
62008 sys_close(root_fd);
62009
62010 if (new_decode_dev(real_root_dev) == Root_RAM0) {
62011- sys_chdir("/old");
62012+ sys_chdir((__force const char __user *)"/old");
62013 return;
62014 }
62015
62016@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
62017 mount_root();
62018
62019 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
62020- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
62021+ error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
62022 if (!error)
62023 printk("okay\n");
62024 else {
62025- int fd = sys_open("/dev/root.old", O_RDWR, 0);
62026+ int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
62027 if (error == -ENOENT)
62028 printk("/initrd does not exist. Ignored.\n");
62029 else
62030 printk("failed\n");
62031 printk(KERN_NOTICE "Unmounting old root\n");
62032- sys_umount("/old", MNT_DETACH);
62033+ sys_umount((__force char __user *)"/old", MNT_DETACH);
62034 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
62035 if (fd < 0) {
62036 error = fd;
62037@@ -119,11 +119,11 @@ int __init initrd_load(void)
62038 * mounted in the normal path.
62039 */
62040 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
62041- sys_unlink("/initrd.image");
62042+ sys_unlink((__force const char __user *)"/initrd.image");
62043 handle_initrd();
62044 return 1;
62045 }
62046 }
62047- sys_unlink("/initrd.image");
62048+ sys_unlink((__force const char __user *)"/initrd.image");
62049 return 0;
62050 }
62051diff -urNp linux-2.6.32.45/init/do_mounts_md.c linux-2.6.32.45/init/do_mounts_md.c
62052--- linux-2.6.32.45/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
62053+++ linux-2.6.32.45/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
62054@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
62055 partitioned ? "_d" : "", minor,
62056 md_setup_args[ent].device_names);
62057
62058- fd = sys_open(name, 0, 0);
62059+ fd = sys_open((__force char __user *)name, 0, 0);
62060 if (fd < 0) {
62061 printk(KERN_ERR "md: open failed - cannot start "
62062 "array %s\n", name);
62063@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
62064 * array without it
62065 */
62066 sys_close(fd);
62067- fd = sys_open(name, 0, 0);
62068+ fd = sys_open((__force char __user *)name, 0, 0);
62069 sys_ioctl(fd, BLKRRPART, 0);
62070 }
62071 sys_close(fd);
62072@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
62073
62074 wait_for_device_probe();
62075
62076- fd = sys_open("/dev/md0", 0, 0);
62077+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
62078 if (fd >= 0) {
62079 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
62080 sys_close(fd);
62081diff -urNp linux-2.6.32.45/init/initramfs.c linux-2.6.32.45/init/initramfs.c
62082--- linux-2.6.32.45/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
62083+++ linux-2.6.32.45/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
62084@@ -74,7 +74,7 @@ static void __init free_hash(void)
62085 }
62086 }
62087
62088-static long __init do_utime(char __user *filename, time_t mtime)
62089+static long __init do_utime(__force char __user *filename, time_t mtime)
62090 {
62091 struct timespec t[2];
62092
62093@@ -109,7 +109,7 @@ static void __init dir_utime(void)
62094 struct dir_entry *de, *tmp;
62095 list_for_each_entry_safe(de, tmp, &dir_list, list) {
62096 list_del(&de->list);
62097- do_utime(de->name, de->mtime);
62098+ do_utime((__force char __user *)de->name, de->mtime);
62099 kfree(de->name);
62100 kfree(de);
62101 }
62102@@ -271,7 +271,7 @@ static int __init maybe_link(void)
62103 if (nlink >= 2) {
62104 char *old = find_link(major, minor, ino, mode, collected);
62105 if (old)
62106- return (sys_link(old, collected) < 0) ? -1 : 1;
62107+ return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
62108 }
62109 return 0;
62110 }
62111@@ -280,11 +280,11 @@ static void __init clean_path(char *path
62112 {
62113 struct stat st;
62114
62115- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
62116+ if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
62117 if (S_ISDIR(st.st_mode))
62118- sys_rmdir(path);
62119+ sys_rmdir((__force char __user *)path);
62120 else
62121- sys_unlink(path);
62122+ sys_unlink((__force char __user *)path);
62123 }
62124 }
62125
62126@@ -305,7 +305,7 @@ static int __init do_name(void)
62127 int openflags = O_WRONLY|O_CREAT;
62128 if (ml != 1)
62129 openflags |= O_TRUNC;
62130- wfd = sys_open(collected, openflags, mode);
62131+ wfd = sys_open((__force char __user *)collected, openflags, mode);
62132
62133 if (wfd >= 0) {
62134 sys_fchown(wfd, uid, gid);
62135@@ -317,17 +317,17 @@ static int __init do_name(void)
62136 }
62137 }
62138 } else if (S_ISDIR(mode)) {
62139- sys_mkdir(collected, mode);
62140- sys_chown(collected, uid, gid);
62141- sys_chmod(collected, mode);
62142+ sys_mkdir((__force char __user *)collected, mode);
62143+ sys_chown((__force char __user *)collected, uid, gid);
62144+ sys_chmod((__force char __user *)collected, mode);
62145 dir_add(collected, mtime);
62146 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
62147 S_ISFIFO(mode) || S_ISSOCK(mode)) {
62148 if (maybe_link() == 0) {
62149- sys_mknod(collected, mode, rdev);
62150- sys_chown(collected, uid, gid);
62151- sys_chmod(collected, mode);
62152- do_utime(collected, mtime);
62153+ sys_mknod((__force char __user *)collected, mode, rdev);
62154+ sys_chown((__force char __user *)collected, uid, gid);
62155+ sys_chmod((__force char __user *)collected, mode);
62156+ do_utime((__force char __user *)collected, mtime);
62157 }
62158 }
62159 return 0;
62160@@ -336,15 +336,15 @@ static int __init do_name(void)
62161 static int __init do_copy(void)
62162 {
62163 if (count >= body_len) {
62164- sys_write(wfd, victim, body_len);
62165+ sys_write(wfd, (__force char __user *)victim, body_len);
62166 sys_close(wfd);
62167- do_utime(vcollected, mtime);
62168+ do_utime((__force char __user *)vcollected, mtime);
62169 kfree(vcollected);
62170 eat(body_len);
62171 state = SkipIt;
62172 return 0;
62173 } else {
62174- sys_write(wfd, victim, count);
62175+ sys_write(wfd, (__force char __user *)victim, count);
62176 body_len -= count;
62177 eat(count);
62178 return 1;
62179@@ -355,9 +355,9 @@ static int __init do_symlink(void)
62180 {
62181 collected[N_ALIGN(name_len) + body_len] = '\0';
62182 clean_path(collected, 0);
62183- sys_symlink(collected + N_ALIGN(name_len), collected);
62184- sys_lchown(collected, uid, gid);
62185- do_utime(collected, mtime);
62186+ sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
62187+ sys_lchown((__force char __user *)collected, uid, gid);
62188+ do_utime((__force char __user *)collected, mtime);
62189 state = SkipIt;
62190 next_state = Reset;
62191 return 0;
62192diff -urNp linux-2.6.32.45/init/Kconfig linux-2.6.32.45/init/Kconfig
62193--- linux-2.6.32.45/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
62194+++ linux-2.6.32.45/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
62195@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
62196
62197 config COMPAT_BRK
62198 bool "Disable heap randomization"
62199- default y
62200+ default n
62201 help
62202 Randomizing heap placement makes heap exploits harder, but it
62203 also breaks ancient binaries (including anything libc5 based).
62204diff -urNp linux-2.6.32.45/init/main.c linux-2.6.32.45/init/main.c
62205--- linux-2.6.32.45/init/main.c 2011-05-10 22:12:01.000000000 -0400
62206+++ linux-2.6.32.45/init/main.c 2011-08-05 20:33:55.000000000 -0400
62207@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
62208 #ifdef CONFIG_TC
62209 extern void tc_init(void);
62210 #endif
62211+extern void grsecurity_init(void);
62212
62213 enum system_states system_state __read_mostly;
62214 EXPORT_SYMBOL(system_state);
62215@@ -183,6 +184,49 @@ static int __init set_reset_devices(char
62216
62217 __setup("reset_devices", set_reset_devices);
62218
62219+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62220+extern char pax_enter_kernel_user[];
62221+extern char pax_exit_kernel_user[];
62222+extern pgdval_t clone_pgd_mask;
62223+#endif
62224+
62225+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62226+static int __init setup_pax_nouderef(char *str)
62227+{
62228+#ifdef CONFIG_X86_32
62229+ unsigned int cpu;
62230+ struct desc_struct *gdt;
62231+
62232+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
62233+ gdt = get_cpu_gdt_table(cpu);
62234+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62235+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62236+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62237+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62238+ }
62239+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62240+#else
62241+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62242+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62243+ clone_pgd_mask = ~(pgdval_t)0UL;
62244+#endif
62245+
62246+ return 0;
62247+}
62248+early_param("pax_nouderef", setup_pax_nouderef);
62249+#endif
62250+
62251+#ifdef CONFIG_PAX_SOFTMODE
62252+int pax_softmode;
62253+
62254+static int __init setup_pax_softmode(char *str)
62255+{
62256+ get_option(&str, &pax_softmode);
62257+ return 1;
62258+}
62259+__setup("pax_softmode=", setup_pax_softmode);
62260+#endif
62261+
62262 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62263 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62264 static const char *panic_later, *panic_param;
62265@@ -705,52 +749,53 @@ int initcall_debug;
62266 core_param(initcall_debug, initcall_debug, bool, 0644);
62267
62268 static char msgbuf[64];
62269-static struct boot_trace_call call;
62270-static struct boot_trace_ret ret;
62271+static struct boot_trace_call trace_call;
62272+static struct boot_trace_ret trace_ret;
62273
62274 int do_one_initcall(initcall_t fn)
62275 {
62276 int count = preempt_count();
62277 ktime_t calltime, delta, rettime;
62278+ const char *msg1 = "", *msg2 = "";
62279
62280 if (initcall_debug) {
62281- call.caller = task_pid_nr(current);
62282- printk("calling %pF @ %i\n", fn, call.caller);
62283+ trace_call.caller = task_pid_nr(current);
62284+ printk("calling %pF @ %i\n", fn, trace_call.caller);
62285 calltime = ktime_get();
62286- trace_boot_call(&call, fn);
62287+ trace_boot_call(&trace_call, fn);
62288 enable_boot_trace();
62289 }
62290
62291- ret.result = fn();
62292+ trace_ret.result = fn();
62293
62294 if (initcall_debug) {
62295 disable_boot_trace();
62296 rettime = ktime_get();
62297 delta = ktime_sub(rettime, calltime);
62298- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
62299- trace_boot_ret(&ret, fn);
62300+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
62301+ trace_boot_ret(&trace_ret, fn);
62302 printk("initcall %pF returned %d after %Ld usecs\n", fn,
62303- ret.result, ret.duration);
62304+ trace_ret.result, trace_ret.duration);
62305 }
62306
62307 msgbuf[0] = 0;
62308
62309- if (ret.result && ret.result != -ENODEV && initcall_debug)
62310- sprintf(msgbuf, "error code %d ", ret.result);
62311+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
62312+ sprintf(msgbuf, "error code %d ", trace_ret.result);
62313
62314 if (preempt_count() != count) {
62315- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62316+ msg1 = " preemption imbalance";
62317 preempt_count() = count;
62318 }
62319 if (irqs_disabled()) {
62320- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62321+ msg2 = " disabled interrupts";
62322 local_irq_enable();
62323 }
62324- if (msgbuf[0]) {
62325- printk("initcall %pF returned with %s\n", fn, msgbuf);
62326+ if (msgbuf[0] || *msg1 || *msg2) {
62327+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62328 }
62329
62330- return ret.result;
62331+ return trace_ret.result;
62332 }
62333
62334
62335@@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
62336 if (!ramdisk_execute_command)
62337 ramdisk_execute_command = "/init";
62338
62339- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62340+ if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
62341 ramdisk_execute_command = NULL;
62342 prepare_namespace();
62343 }
62344
62345+ grsecurity_init();
62346+
62347 /*
62348 * Ok, we have completed the initial bootup, and
62349 * we're essentially up and running. Get rid of the
62350diff -urNp linux-2.6.32.45/init/noinitramfs.c linux-2.6.32.45/init/noinitramfs.c
62351--- linux-2.6.32.45/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
62352+++ linux-2.6.32.45/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
62353@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
62354 {
62355 int err;
62356
62357- err = sys_mkdir("/dev", 0755);
62358+ err = sys_mkdir((const char __user *)"/dev", 0755);
62359 if (err < 0)
62360 goto out;
62361
62362@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
62363 if (err < 0)
62364 goto out;
62365
62366- err = sys_mkdir("/root", 0700);
62367+ err = sys_mkdir((const char __user *)"/root", 0700);
62368 if (err < 0)
62369 goto out;
62370
62371diff -urNp linux-2.6.32.45/ipc/mqueue.c linux-2.6.32.45/ipc/mqueue.c
62372--- linux-2.6.32.45/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
62373+++ linux-2.6.32.45/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
62374@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
62375 mq_bytes = (mq_msg_tblsz +
62376 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62377
62378+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62379 spin_lock(&mq_lock);
62380 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62381 u->mq_bytes + mq_bytes >
62382diff -urNp linux-2.6.32.45/ipc/msg.c linux-2.6.32.45/ipc/msg.c
62383--- linux-2.6.32.45/ipc/msg.c 2011-03-27 14:31:47.000000000 -0400
62384+++ linux-2.6.32.45/ipc/msg.c 2011-08-05 20:33:55.000000000 -0400
62385@@ -310,18 +310,19 @@ static inline int msg_security(struct ke
62386 return security_msg_queue_associate(msq, msgflg);
62387 }
62388
62389+static struct ipc_ops msg_ops = {
62390+ .getnew = newque,
62391+ .associate = msg_security,
62392+ .more_checks = NULL
62393+};
62394+
62395 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62396 {
62397 struct ipc_namespace *ns;
62398- struct ipc_ops msg_ops;
62399 struct ipc_params msg_params;
62400
62401 ns = current->nsproxy->ipc_ns;
62402
62403- msg_ops.getnew = newque;
62404- msg_ops.associate = msg_security;
62405- msg_ops.more_checks = NULL;
62406-
62407 msg_params.key = key;
62408 msg_params.flg = msgflg;
62409
62410diff -urNp linux-2.6.32.45/ipc/sem.c linux-2.6.32.45/ipc/sem.c
62411--- linux-2.6.32.45/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
62412+++ linux-2.6.32.45/ipc/sem.c 2011-08-05 20:33:55.000000000 -0400
62413@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct
62414 return 0;
62415 }
62416
62417+static struct ipc_ops sem_ops = {
62418+ .getnew = newary,
62419+ .associate = sem_security,
62420+ .more_checks = sem_more_checks
62421+};
62422+
62423 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62424 {
62425 struct ipc_namespace *ns;
62426- struct ipc_ops sem_ops;
62427 struct ipc_params sem_params;
62428
62429 ns = current->nsproxy->ipc_ns;
62430@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
62431 if (nsems < 0 || nsems > ns->sc_semmsl)
62432 return -EINVAL;
62433
62434- sem_ops.getnew = newary;
62435- sem_ops.associate = sem_security;
62436- sem_ops.more_checks = sem_more_checks;
62437-
62438 sem_params.key = key;
62439 sem_params.flg = semflg;
62440 sem_params.u.nsems = nsems;
62441@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namesp
62442 ushort* sem_io = fast_sem_io;
62443 int nsems;
62444
62445+ pax_track_stack();
62446+
62447 sma = sem_lock_check(ns, semid);
62448 if (IS_ERR(sma))
62449 return PTR_ERR(sma);
62450@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
62451 unsigned long jiffies_left = 0;
62452 struct ipc_namespace *ns;
62453
62454+ pax_track_stack();
62455+
62456 ns = current->nsproxy->ipc_ns;
62457
62458 if (nsops < 1 || semid < 0)
62459diff -urNp linux-2.6.32.45/ipc/shm.c linux-2.6.32.45/ipc/shm.c
62460--- linux-2.6.32.45/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
62461+++ linux-2.6.32.45/ipc/shm.c 2011-08-05 20:33:55.000000000 -0400
62462@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
62463 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62464 #endif
62465
62466+#ifdef CONFIG_GRKERNSEC
62467+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62468+ const time_t shm_createtime, const uid_t cuid,
62469+ const int shmid);
62470+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62471+ const time_t shm_createtime);
62472+#endif
62473+
62474 void shm_init_ns(struct ipc_namespace *ns)
62475 {
62476 ns->shm_ctlmax = SHMMAX;
62477@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
62478 shp->shm_lprid = 0;
62479 shp->shm_atim = shp->shm_dtim = 0;
62480 shp->shm_ctim = get_seconds();
62481+#ifdef CONFIG_GRKERNSEC
62482+ {
62483+ struct timespec timeval;
62484+ do_posix_clock_monotonic_gettime(&timeval);
62485+
62486+ shp->shm_createtime = timeval.tv_sec;
62487+ }
62488+#endif
62489 shp->shm_segsz = size;
62490 shp->shm_nattch = 0;
62491 shp->shm_file = file;
62492@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct
62493 return 0;
62494 }
62495
62496+static struct ipc_ops shm_ops = {
62497+ .getnew = newseg,
62498+ .associate = shm_security,
62499+ .more_checks = shm_more_checks
62500+};
62501+
62502 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62503 {
62504 struct ipc_namespace *ns;
62505- struct ipc_ops shm_ops;
62506 struct ipc_params shm_params;
62507
62508 ns = current->nsproxy->ipc_ns;
62509
62510- shm_ops.getnew = newseg;
62511- shm_ops.associate = shm_security;
62512- shm_ops.more_checks = shm_more_checks;
62513-
62514 shm_params.key = key;
62515 shm_params.flg = shmflg;
62516 shm_params.u.size = size;
62517@@ -880,9 +897,21 @@ long do_shmat(int shmid, char __user *sh
62518 if (err)
62519 goto out_unlock;
62520
62521+#ifdef CONFIG_GRKERNSEC
62522+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62523+ shp->shm_perm.cuid, shmid) ||
62524+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62525+ err = -EACCES;
62526+ goto out_unlock;
62527+ }
62528+#endif
62529+
62530 path.dentry = dget(shp->shm_file->f_path.dentry);
62531 path.mnt = shp->shm_file->f_path.mnt;
62532 shp->shm_nattch++;
62533+#ifdef CONFIG_GRKERNSEC
62534+ shp->shm_lapid = current->pid;
62535+#endif
62536 size = i_size_read(path.dentry->d_inode);
62537 shm_unlock(shp);
62538
62539diff -urNp linux-2.6.32.45/kernel/acct.c linux-2.6.32.45/kernel/acct.c
62540--- linux-2.6.32.45/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
62541+++ linux-2.6.32.45/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
62542@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
62543 */
62544 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62545 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62546- file->f_op->write(file, (char *)&ac,
62547+ file->f_op->write(file, (__force char __user *)&ac,
62548 sizeof(acct_t), &file->f_pos);
62549 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62550 set_fs(fs);
62551diff -urNp linux-2.6.32.45/kernel/audit.c linux-2.6.32.45/kernel/audit.c
62552--- linux-2.6.32.45/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
62553+++ linux-2.6.32.45/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
62554@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
62555 3) suppressed due to audit_rate_limit
62556 4) suppressed due to audit_backlog_limit
62557 */
62558-static atomic_t audit_lost = ATOMIC_INIT(0);
62559+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62560
62561 /* The netlink socket. */
62562 static struct sock *audit_sock;
62563@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
62564 unsigned long now;
62565 int print;
62566
62567- atomic_inc(&audit_lost);
62568+ atomic_inc_unchecked(&audit_lost);
62569
62570 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62571
62572@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
62573 printk(KERN_WARNING
62574 "audit: audit_lost=%d audit_rate_limit=%d "
62575 "audit_backlog_limit=%d\n",
62576- atomic_read(&audit_lost),
62577+ atomic_read_unchecked(&audit_lost),
62578 audit_rate_limit,
62579 audit_backlog_limit);
62580 audit_panic(message);
62581@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
62582 status_set.pid = audit_pid;
62583 status_set.rate_limit = audit_rate_limit;
62584 status_set.backlog_limit = audit_backlog_limit;
62585- status_set.lost = atomic_read(&audit_lost);
62586+ status_set.lost = atomic_read_unchecked(&audit_lost);
62587 status_set.backlog = skb_queue_len(&audit_skb_queue);
62588 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62589 &status_set, sizeof(status_set));
62590@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
62591 spin_unlock_irq(&tsk->sighand->siglock);
62592 }
62593 read_unlock(&tasklist_lock);
62594- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
62595- &s, sizeof(s));
62596+
62597+ if (!err)
62598+ audit_send_reply(NETLINK_CB(skb).pid, seq,
62599+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
62600 break;
62601 }
62602 case AUDIT_TTY_SET: {
62603diff -urNp linux-2.6.32.45/kernel/auditsc.c linux-2.6.32.45/kernel/auditsc.c
62604--- linux-2.6.32.45/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
62605+++ linux-2.6.32.45/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
62606@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
62607 }
62608
62609 /* global counter which is incremented every time something logs in */
62610-static atomic_t session_id = ATOMIC_INIT(0);
62611+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62612
62613 /**
62614 * audit_set_loginuid - set a task's audit_context loginuid
62615@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
62616 */
62617 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62618 {
62619- unsigned int sessionid = atomic_inc_return(&session_id);
62620+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62621 struct audit_context *context = task->audit_context;
62622
62623 if (context && context->in_syscall) {
62624diff -urNp linux-2.6.32.45/kernel/capability.c linux-2.6.32.45/kernel/capability.c
62625--- linux-2.6.32.45/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
62626+++ linux-2.6.32.45/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
62627@@ -305,10 +305,26 @@ int capable(int cap)
62628 BUG();
62629 }
62630
62631- if (security_capable(cap) == 0) {
62632+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
62633 current->flags |= PF_SUPERPRIV;
62634 return 1;
62635 }
62636 return 0;
62637 }
62638+
62639+int capable_nolog(int cap)
62640+{
62641+ if (unlikely(!cap_valid(cap))) {
62642+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62643+ BUG();
62644+ }
62645+
62646+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
62647+ current->flags |= PF_SUPERPRIV;
62648+ return 1;
62649+ }
62650+ return 0;
62651+}
62652+
62653 EXPORT_SYMBOL(capable);
62654+EXPORT_SYMBOL(capable_nolog);
62655diff -urNp linux-2.6.32.45/kernel/cgroup.c linux-2.6.32.45/kernel/cgroup.c
62656--- linux-2.6.32.45/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
62657+++ linux-2.6.32.45/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
62658@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
62659 struct hlist_head *hhead;
62660 struct cg_cgroup_link *link;
62661
62662+ pax_track_stack();
62663+
62664 /* First see if we already have a cgroup group that matches
62665 * the desired set */
62666 read_lock(&css_set_lock);
62667diff -urNp linux-2.6.32.45/kernel/configs.c linux-2.6.32.45/kernel/configs.c
62668--- linux-2.6.32.45/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
62669+++ linux-2.6.32.45/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
62670@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
62671 struct proc_dir_entry *entry;
62672
62673 /* create the current config file */
62674+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62675+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62676+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62677+ &ikconfig_file_ops);
62678+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62679+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62680+ &ikconfig_file_ops);
62681+#endif
62682+#else
62683 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62684 &ikconfig_file_ops);
62685+#endif
62686+
62687 if (!entry)
62688 return -ENOMEM;
62689
62690diff -urNp linux-2.6.32.45/kernel/cpu.c linux-2.6.32.45/kernel/cpu.c
62691--- linux-2.6.32.45/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
62692+++ linux-2.6.32.45/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
62693@@ -19,7 +19,7 @@
62694 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
62695 static DEFINE_MUTEX(cpu_add_remove_lock);
62696
62697-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
62698+static RAW_NOTIFIER_HEAD(cpu_chain);
62699
62700 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
62701 * Should always be manipulated under cpu_add_remove_lock
62702diff -urNp linux-2.6.32.45/kernel/cred.c linux-2.6.32.45/kernel/cred.c
62703--- linux-2.6.32.45/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
62704+++ linux-2.6.32.45/kernel/cred.c 2011-08-11 19:49:38.000000000 -0400
62705@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
62706 */
62707 void __put_cred(struct cred *cred)
62708 {
62709+ pax_track_stack();
62710+
62711 kdebug("__put_cred(%p{%d,%d})", cred,
62712 atomic_read(&cred->usage),
62713 read_cred_subscribers(cred));
62714@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
62715 {
62716 struct cred *cred;
62717
62718+ pax_track_stack();
62719+
62720 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
62721 atomic_read(&tsk->cred->usage),
62722 read_cred_subscribers(tsk->cred));
62723@@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
62724 {
62725 const struct cred *cred;
62726
62727+ pax_track_stack();
62728+
62729 rcu_read_lock();
62730
62731 do {
62732@@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
62733 {
62734 struct cred *new;
62735
62736+ pax_track_stack();
62737+
62738 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
62739 if (!new)
62740 return NULL;
62741@@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
62742 const struct cred *old;
62743 struct cred *new;
62744
62745+ pax_track_stack();
62746+
62747 validate_process_creds();
62748
62749 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62750@@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
62751 struct thread_group_cred *tgcred = NULL;
62752 struct cred *new;
62753
62754+ pax_track_stack();
62755+
62756 #ifdef CONFIG_KEYS
62757 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
62758 if (!tgcred)
62759@@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
62760 struct cred *new;
62761 int ret;
62762
62763+ pax_track_stack();
62764+
62765 mutex_init(&p->cred_guard_mutex);
62766
62767 if (
62768@@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
62769 struct task_struct *task = current;
62770 const struct cred *old = task->real_cred;
62771
62772+ pax_track_stack();
62773+
62774 kdebug("commit_creds(%p{%d,%d})", new,
62775 atomic_read(&new->usage),
62776 read_cred_subscribers(new));
62777@@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
62778
62779 get_cred(new); /* we will require a ref for the subj creds too */
62780
62781+ gr_set_role_label(task, new->uid, new->gid);
62782+
62783 /* dumpability changes */
62784 if (old->euid != new->euid ||
62785 old->egid != new->egid ||
62786@@ -563,10 +581,8 @@ int commit_creds(struct cred *new)
62787 key_fsgid_changed(task);
62788
62789 /* do it
62790- * - What if a process setreuid()'s and this brings the
62791- * new uid over his NPROC rlimit? We can check this now
62792- * cheaply with the new uid cache, so if it matters
62793- * we should be checking for it. -DaveM
62794+ * RLIMIT_NPROC limits on user->processes have already been checked
62795+ * in set_user().
62796 */
62797 alter_cred_subscribers(new, 2);
62798 if (new->user != old->user)
62799@@ -606,6 +622,8 @@ EXPORT_SYMBOL(commit_creds);
62800 */
62801 void abort_creds(struct cred *new)
62802 {
62803+ pax_track_stack();
62804+
62805 kdebug("abort_creds(%p{%d,%d})", new,
62806 atomic_read(&new->usage),
62807 read_cred_subscribers(new));
62808@@ -629,6 +647,8 @@ const struct cred *override_creds(const
62809 {
62810 const struct cred *old = current->cred;
62811
62812+ pax_track_stack();
62813+
62814 kdebug("override_creds(%p{%d,%d})", new,
62815 atomic_read(&new->usage),
62816 read_cred_subscribers(new));
62817@@ -658,6 +678,8 @@ void revert_creds(const struct cred *old
62818 {
62819 const struct cred *override = current->cred;
62820
62821+ pax_track_stack();
62822+
62823 kdebug("revert_creds(%p{%d,%d})", old,
62824 atomic_read(&old->usage),
62825 read_cred_subscribers(old));
62826@@ -704,6 +726,8 @@ struct cred *prepare_kernel_cred(struct
62827 const struct cred *old;
62828 struct cred *new;
62829
62830+ pax_track_stack();
62831+
62832 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
62833 if (!new)
62834 return NULL;
62835@@ -758,6 +782,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
62836 */
62837 int set_security_override(struct cred *new, u32 secid)
62838 {
62839+ pax_track_stack();
62840+
62841 return security_kernel_act_as(new, secid);
62842 }
62843 EXPORT_SYMBOL(set_security_override);
62844@@ -777,6 +803,8 @@ int set_security_override_from_ctx(struc
62845 u32 secid;
62846 int ret;
62847
62848+ pax_track_stack();
62849+
62850 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
62851 if (ret < 0)
62852 return ret;
62853diff -urNp linux-2.6.32.45/kernel/exit.c linux-2.6.32.45/kernel/exit.c
62854--- linux-2.6.32.45/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
62855+++ linux-2.6.32.45/kernel/exit.c 2011-08-17 19:19:50.000000000 -0400
62856@@ -55,6 +55,10 @@
62857 #include <asm/pgtable.h>
62858 #include <asm/mmu_context.h>
62859
62860+#ifdef CONFIG_GRKERNSEC
62861+extern rwlock_t grsec_exec_file_lock;
62862+#endif
62863+
62864 static void exit_mm(struct task_struct * tsk);
62865
62866 static void __unhash_process(struct task_struct *p)
62867@@ -174,6 +178,10 @@ void release_task(struct task_struct * p
62868 struct task_struct *leader;
62869 int zap_leader;
62870 repeat:
62871+#ifdef CONFIG_NET
62872+ gr_del_task_from_ip_table(p);
62873+#endif
62874+
62875 tracehook_prepare_release_task(p);
62876 /* don't need to get the RCU readlock here - the process is dead and
62877 * can't be modifying its own credentials */
62878@@ -341,11 +349,22 @@ static void reparent_to_kthreadd(void)
62879 {
62880 write_lock_irq(&tasklist_lock);
62881
62882+#ifdef CONFIG_GRKERNSEC
62883+ write_lock(&grsec_exec_file_lock);
62884+ if (current->exec_file) {
62885+ fput(current->exec_file);
62886+ current->exec_file = NULL;
62887+ }
62888+ write_unlock(&grsec_exec_file_lock);
62889+#endif
62890+
62891 ptrace_unlink(current);
62892 /* Reparent to init */
62893 current->real_parent = current->parent = kthreadd_task;
62894 list_move_tail(&current->sibling, &current->real_parent->children);
62895
62896+ gr_set_kernel_label(current);
62897+
62898 /* Set the exit signal to SIGCHLD so we signal init on exit */
62899 current->exit_signal = SIGCHLD;
62900
62901@@ -397,7 +416,7 @@ int allow_signal(int sig)
62902 * know it'll be handled, so that they don't get converted to
62903 * SIGKILL or just silently dropped.
62904 */
62905- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
62906+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
62907 recalc_sigpending();
62908 spin_unlock_irq(&current->sighand->siglock);
62909 return 0;
62910@@ -433,6 +452,17 @@ void daemonize(const char *name, ...)
62911 vsnprintf(current->comm, sizeof(current->comm), name, args);
62912 va_end(args);
62913
62914+#ifdef CONFIG_GRKERNSEC
62915+ write_lock(&grsec_exec_file_lock);
62916+ if (current->exec_file) {
62917+ fput(current->exec_file);
62918+ current->exec_file = NULL;
62919+ }
62920+ write_unlock(&grsec_exec_file_lock);
62921+#endif
62922+
62923+ gr_set_kernel_label(current);
62924+
62925 /*
62926 * If we were started as result of loading a module, close all of the
62927 * user space pages. We don't need them, and if we didn't close them
62928@@ -897,17 +927,17 @@ NORET_TYPE void do_exit(long code)
62929 struct task_struct *tsk = current;
62930 int group_dead;
62931
62932- profile_task_exit(tsk);
62933-
62934- WARN_ON(atomic_read(&tsk->fs_excl));
62935-
62936+ /*
62937+ * Check this first since set_fs() below depends on
62938+ * current_thread_info(), which we better not access when we're in
62939+ * interrupt context. Other than that, we want to do the set_fs()
62940+ * as early as possible.
62941+ */
62942 if (unlikely(in_interrupt()))
62943 panic("Aiee, killing interrupt handler!");
62944- if (unlikely(!tsk->pid))
62945- panic("Attempted to kill the idle task!");
62946
62947 /*
62948- * If do_exit is called because this processes oopsed, it's possible
62949+ * If do_exit is called because this processes Oops'ed, it's possible
62950 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
62951 * continuing. Amongst other possible reasons, this is to prevent
62952 * mm_release()->clear_child_tid() from writing to a user-controlled
62953@@ -915,6 +945,13 @@ NORET_TYPE void do_exit(long code)
62954 */
62955 set_fs(USER_DS);
62956
62957+ profile_task_exit(tsk);
62958+
62959+ WARN_ON(atomic_read(&tsk->fs_excl));
62960+
62961+ if (unlikely(!tsk->pid))
62962+ panic("Attempted to kill the idle task!");
62963+
62964 tracehook_report_exit(&code);
62965
62966 validate_creds_for_do_exit(tsk);
62967@@ -973,6 +1010,9 @@ NORET_TYPE void do_exit(long code)
62968 tsk->exit_code = code;
62969 taskstats_exit(tsk, group_dead);
62970
62971+ gr_acl_handle_psacct(tsk, code);
62972+ gr_acl_handle_exit();
62973+
62974 exit_mm(tsk);
62975
62976 if (group_dead)
62977@@ -1188,7 +1228,7 @@ static int wait_task_zombie(struct wait_
62978
62979 if (unlikely(wo->wo_flags & WNOWAIT)) {
62980 int exit_code = p->exit_code;
62981- int why, status;
62982+ int why;
62983
62984 get_task_struct(p);
62985 read_unlock(&tasklist_lock);
62986diff -urNp linux-2.6.32.45/kernel/fork.c linux-2.6.32.45/kernel/fork.c
62987--- linux-2.6.32.45/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
62988+++ linux-2.6.32.45/kernel/fork.c 2011-08-11 19:50:07.000000000 -0400
62989@@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
62990 *stackend = STACK_END_MAGIC; /* for overflow detection */
62991
62992 #ifdef CONFIG_CC_STACKPROTECTOR
62993- tsk->stack_canary = get_random_int();
62994+ tsk->stack_canary = pax_get_random_long();
62995 #endif
62996
62997 /* One for us, one for whoever does the "release_task()" (usually parent) */
62998@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
62999 mm->locked_vm = 0;
63000 mm->mmap = NULL;
63001 mm->mmap_cache = NULL;
63002- mm->free_area_cache = oldmm->mmap_base;
63003- mm->cached_hole_size = ~0UL;
63004+ mm->free_area_cache = oldmm->free_area_cache;
63005+ mm->cached_hole_size = oldmm->cached_hole_size;
63006 mm->map_count = 0;
63007 cpumask_clear(mm_cpumask(mm));
63008 mm->mm_rb = RB_ROOT;
63009@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
63010 tmp->vm_flags &= ~VM_LOCKED;
63011 tmp->vm_mm = mm;
63012 tmp->vm_next = tmp->vm_prev = NULL;
63013+ tmp->vm_mirror = NULL;
63014 anon_vma_link(tmp);
63015 file = tmp->vm_file;
63016 if (file) {
63017@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
63018 if (retval)
63019 goto out;
63020 }
63021+
63022+#ifdef CONFIG_PAX_SEGMEXEC
63023+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63024+ struct vm_area_struct *mpnt_m;
63025+
63026+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63027+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63028+
63029+ if (!mpnt->vm_mirror)
63030+ continue;
63031+
63032+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63033+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63034+ mpnt->vm_mirror = mpnt_m;
63035+ } else {
63036+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63037+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63038+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63039+ mpnt->vm_mirror->vm_mirror = mpnt;
63040+ }
63041+ }
63042+ BUG_ON(mpnt_m);
63043+ }
63044+#endif
63045+
63046 /* a new mm has just been created */
63047 arch_dup_mmap(oldmm, mm);
63048 retval = 0;
63049@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
63050 write_unlock(&fs->lock);
63051 return -EAGAIN;
63052 }
63053- fs->users++;
63054+ atomic_inc(&fs->users);
63055 write_unlock(&fs->lock);
63056 return 0;
63057 }
63058 tsk->fs = copy_fs_struct(fs);
63059 if (!tsk->fs)
63060 return -ENOMEM;
63061+ gr_set_chroot_entries(tsk, &tsk->fs->root);
63062 return 0;
63063 }
63064
63065@@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(
63066 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63067 #endif
63068 retval = -EAGAIN;
63069+
63070+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63071+
63072 if (atomic_read(&p->real_cred->user->processes) >=
63073 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
63074- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63075- p->real_cred->user != INIT_USER)
63076+ if (p->real_cred->user != INIT_USER &&
63077+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
63078 goto bad_fork_free;
63079 }
63080+ current->flags &= ~PF_NPROC_EXCEEDED;
63081
63082 retval = copy_creds(p, clone_flags);
63083 if (retval < 0)
63084@@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(
63085 goto bad_fork_free_pid;
63086 }
63087
63088+ gr_copy_label(p);
63089+
63090 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63091 /*
63092 * Clear TID on mm_release()?
63093@@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
63094 bad_fork_free:
63095 free_task(p);
63096 fork_out:
63097+ gr_log_forkfail(retval);
63098+
63099 return ERR_PTR(retval);
63100 }
63101
63102@@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
63103 if (clone_flags & CLONE_PARENT_SETTID)
63104 put_user(nr, parent_tidptr);
63105
63106+ gr_handle_brute_check();
63107+
63108 if (clone_flags & CLONE_VFORK) {
63109 p->vfork_done = &vfork;
63110 init_completion(&vfork);
63111@@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unsh
63112 return 0;
63113
63114 /* don't need lock here; in the worst case we'll do useless copy */
63115- if (fs->users == 1)
63116+ if (atomic_read(&fs->users) == 1)
63117 return 0;
63118
63119 *new_fsp = copy_fs_struct(fs);
63120@@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
63121 fs = current->fs;
63122 write_lock(&fs->lock);
63123 current->fs = new_fs;
63124- if (--fs->users)
63125+ gr_set_chroot_entries(current, &current->fs->root);
63126+ if (atomic_dec_return(&fs->users))
63127 new_fs = NULL;
63128 else
63129 new_fs = fs;
63130diff -urNp linux-2.6.32.45/kernel/futex.c linux-2.6.32.45/kernel/futex.c
63131--- linux-2.6.32.45/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
63132+++ linux-2.6.32.45/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
63133@@ -54,6 +54,7 @@
63134 #include <linux/mount.h>
63135 #include <linux/pagemap.h>
63136 #include <linux/syscalls.h>
63137+#include <linux/ptrace.h>
63138 #include <linux/signal.h>
63139 #include <linux/module.h>
63140 #include <linux/magic.h>
63141@@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
63142 struct page *page;
63143 int err;
63144
63145+#ifdef CONFIG_PAX_SEGMEXEC
63146+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63147+ return -EFAULT;
63148+#endif
63149+
63150 /*
63151 * The futex address must be "naturally" aligned.
63152 */
63153@@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
63154 struct futex_q q;
63155 int ret;
63156
63157+ pax_track_stack();
63158+
63159 if (!bitset)
63160 return -EINVAL;
63161
63162@@ -1841,7 +1849,7 @@ retry:
63163
63164 restart = &current_thread_info()->restart_block;
63165 restart->fn = futex_wait_restart;
63166- restart->futex.uaddr = (u32 *)uaddr;
63167+ restart->futex.uaddr = uaddr;
63168 restart->futex.val = val;
63169 restart->futex.time = abs_time->tv64;
63170 restart->futex.bitset = bitset;
63171@@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
63172 struct futex_q q;
63173 int res, ret;
63174
63175+ pax_track_stack();
63176+
63177 if (!bitset)
63178 return -EINVAL;
63179
63180@@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63181 {
63182 struct robust_list_head __user *head;
63183 unsigned long ret;
63184+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63185 const struct cred *cred = current_cred(), *pcred;
63186+#endif
63187
63188 if (!futex_cmpxchg_enabled)
63189 return -ENOSYS;
63190@@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63191 if (!p)
63192 goto err_unlock;
63193 ret = -EPERM;
63194+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63195+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63196+ goto err_unlock;
63197+#else
63198 pcred = __task_cred(p);
63199 if (cred->euid != pcred->euid &&
63200 cred->euid != pcred->uid &&
63201 !capable(CAP_SYS_PTRACE))
63202 goto err_unlock;
63203+#endif
63204 head = p->robust_list;
63205 rcu_read_unlock();
63206 }
63207@@ -2459,7 +2476,7 @@ retry:
63208 */
63209 static inline int fetch_robust_entry(struct robust_list __user **entry,
63210 struct robust_list __user * __user *head,
63211- int *pi)
63212+ unsigned int *pi)
63213 {
63214 unsigned long uentry;
63215
63216@@ -2640,6 +2657,7 @@ static int __init futex_init(void)
63217 {
63218 u32 curval;
63219 int i;
63220+ mm_segment_t oldfs;
63221
63222 /*
63223 * This will fail and we want it. Some arch implementations do
63224@@ -2651,7 +2669,10 @@ static int __init futex_init(void)
63225 * implementation, the non functional ones will return
63226 * -ENOSYS.
63227 */
63228+ oldfs = get_fs();
63229+ set_fs(USER_DS);
63230 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
63231+ set_fs(oldfs);
63232 if (curval == -EFAULT)
63233 futex_cmpxchg_enabled = 1;
63234
63235diff -urNp linux-2.6.32.45/kernel/futex_compat.c linux-2.6.32.45/kernel/futex_compat.c
63236--- linux-2.6.32.45/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
63237+++ linux-2.6.32.45/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
63238@@ -10,6 +10,7 @@
63239 #include <linux/compat.h>
63240 #include <linux/nsproxy.h>
63241 #include <linux/futex.h>
63242+#include <linux/ptrace.h>
63243
63244 #include <asm/uaccess.h>
63245
63246@@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
63247 {
63248 struct compat_robust_list_head __user *head;
63249 unsigned long ret;
63250- const struct cred *cred = current_cred(), *pcred;
63251+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63252+ const struct cred *cred = current_cred();
63253+ const struct cred *pcred;
63254+#endif
63255
63256 if (!futex_cmpxchg_enabled)
63257 return -ENOSYS;
63258@@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
63259 if (!p)
63260 goto err_unlock;
63261 ret = -EPERM;
63262+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63263+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63264+ goto err_unlock;
63265+#else
63266 pcred = __task_cred(p);
63267 if (cred->euid != pcred->euid &&
63268 cred->euid != pcred->uid &&
63269 !capable(CAP_SYS_PTRACE))
63270 goto err_unlock;
63271+#endif
63272 head = p->compat_robust_list;
63273 read_unlock(&tasklist_lock);
63274 }
63275diff -urNp linux-2.6.32.45/kernel/gcov/base.c linux-2.6.32.45/kernel/gcov/base.c
63276--- linux-2.6.32.45/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
63277+++ linux-2.6.32.45/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
63278@@ -102,11 +102,6 @@ void gcov_enable_events(void)
63279 }
63280
63281 #ifdef CONFIG_MODULES
63282-static inline int within(void *addr, void *start, unsigned long size)
63283-{
63284- return ((addr >= start) && (addr < start + size));
63285-}
63286-
63287 /* Update list and generate events when modules are unloaded. */
63288 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63289 void *data)
63290@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
63291 prev = NULL;
63292 /* Remove entries located in module from linked list. */
63293 for (info = gcov_info_head; info; info = info->next) {
63294- if (within(info, mod->module_core, mod->core_size)) {
63295+ if (within_module_core_rw((unsigned long)info, mod)) {
63296 if (prev)
63297 prev->next = info->next;
63298 else
63299diff -urNp linux-2.6.32.45/kernel/hrtimer.c linux-2.6.32.45/kernel/hrtimer.c
63300--- linux-2.6.32.45/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
63301+++ linux-2.6.32.45/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
63302@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
63303 local_irq_restore(flags);
63304 }
63305
63306-static void run_hrtimer_softirq(struct softirq_action *h)
63307+static void run_hrtimer_softirq(void)
63308 {
63309 hrtimer_peek_ahead_timers();
63310 }
63311diff -urNp linux-2.6.32.45/kernel/kallsyms.c linux-2.6.32.45/kernel/kallsyms.c
63312--- linux-2.6.32.45/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
63313+++ linux-2.6.32.45/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
63314@@ -11,6 +11,9 @@
63315 * Changed the compression method from stem compression to "table lookup"
63316 * compression (see scripts/kallsyms.c for a more complete description)
63317 */
63318+#ifdef CONFIG_GRKERNSEC_HIDESYM
63319+#define __INCLUDED_BY_HIDESYM 1
63320+#endif
63321 #include <linux/kallsyms.h>
63322 #include <linux/module.h>
63323 #include <linux/init.h>
63324@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
63325
63326 static inline int is_kernel_inittext(unsigned long addr)
63327 {
63328+ if (system_state != SYSTEM_BOOTING)
63329+ return 0;
63330+
63331 if (addr >= (unsigned long)_sinittext
63332 && addr <= (unsigned long)_einittext)
63333 return 1;
63334 return 0;
63335 }
63336
63337+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63338+#ifdef CONFIG_MODULES
63339+static inline int is_module_text(unsigned long addr)
63340+{
63341+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63342+ return 1;
63343+
63344+ addr = ktla_ktva(addr);
63345+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63346+}
63347+#else
63348+static inline int is_module_text(unsigned long addr)
63349+{
63350+ return 0;
63351+}
63352+#endif
63353+#endif
63354+
63355 static inline int is_kernel_text(unsigned long addr)
63356 {
63357 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63358@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
63359
63360 static inline int is_kernel(unsigned long addr)
63361 {
63362+
63363+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63364+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
63365+ return 1;
63366+
63367+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63368+#else
63369 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63370+#endif
63371+
63372 return 1;
63373 return in_gate_area_no_task(addr);
63374 }
63375
63376 static int is_ksym_addr(unsigned long addr)
63377 {
63378+
63379+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63380+ if (is_module_text(addr))
63381+ return 0;
63382+#endif
63383+
63384 if (all_var)
63385 return is_kernel(addr);
63386
63387@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
63388
63389 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63390 {
63391- iter->name[0] = '\0';
63392 iter->nameoff = get_symbol_offset(new_pos);
63393 iter->pos = new_pos;
63394 }
63395@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
63396 {
63397 struct kallsym_iter *iter = m->private;
63398
63399+#ifdef CONFIG_GRKERNSEC_HIDESYM
63400+ if (current_uid())
63401+ return 0;
63402+#endif
63403+
63404 /* Some debugging symbols have no name. Ignore them. */
63405 if (!iter->name[0])
63406 return 0;
63407@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
63408 struct kallsym_iter *iter;
63409 int ret;
63410
63411- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63412+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63413 if (!iter)
63414 return -ENOMEM;
63415 reset_iter(iter, 0);
63416diff -urNp linux-2.6.32.45/kernel/kgdb.c linux-2.6.32.45/kernel/kgdb.c
63417--- linux-2.6.32.45/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
63418+++ linux-2.6.32.45/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
63419@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
63420 /* Guard for recursive entry */
63421 static int exception_level;
63422
63423-static struct kgdb_io *kgdb_io_ops;
63424+static const struct kgdb_io *kgdb_io_ops;
63425 static DEFINE_SPINLOCK(kgdb_registration_lock);
63426
63427 /* kgdb console driver is loaded */
63428@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
63429 */
63430 static atomic_t passive_cpu_wait[NR_CPUS];
63431 static atomic_t cpu_in_kgdb[NR_CPUS];
63432-atomic_t kgdb_setting_breakpoint;
63433+atomic_unchecked_t kgdb_setting_breakpoint;
63434
63435 struct task_struct *kgdb_usethread;
63436 struct task_struct *kgdb_contthread;
63437@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
63438 sizeof(unsigned long)];
63439
63440 /* to keep track of the CPU which is doing the single stepping*/
63441-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63442+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63443
63444 /*
63445 * If you are debugging a problem where roundup (the collection of
63446@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
63447 return 0;
63448 if (kgdb_connected)
63449 return 1;
63450- if (atomic_read(&kgdb_setting_breakpoint))
63451+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
63452 return 1;
63453 if (print_wait)
63454 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
63455@@ -1426,8 +1426,8 @@ acquirelock:
63456 * instance of the exception handler wanted to come into the
63457 * debugger on a different CPU via a single step
63458 */
63459- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63460- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
63461+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63462+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
63463
63464 atomic_set(&kgdb_active, -1);
63465 touch_softlockup_watchdog();
63466@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
63467 *
63468 * Register it with the KGDB core.
63469 */
63470-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
63471+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
63472 {
63473 int err;
63474
63475@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
63476 *
63477 * Unregister it with the KGDB core.
63478 */
63479-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
63480+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
63481 {
63482 BUG_ON(kgdb_connected);
63483
63484@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
63485 */
63486 void kgdb_breakpoint(void)
63487 {
63488- atomic_set(&kgdb_setting_breakpoint, 1);
63489+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
63490 wmb(); /* Sync point before breakpoint */
63491 arch_kgdb_breakpoint();
63492 wmb(); /* Sync point after breakpoint */
63493- atomic_set(&kgdb_setting_breakpoint, 0);
63494+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
63495 }
63496 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
63497
63498diff -urNp linux-2.6.32.45/kernel/kmod.c linux-2.6.32.45/kernel/kmod.c
63499--- linux-2.6.32.45/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
63500+++ linux-2.6.32.45/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
63501@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63502 * If module auto-loading support is disabled then this function
63503 * becomes a no-operation.
63504 */
63505-int __request_module(bool wait, const char *fmt, ...)
63506+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63507 {
63508- va_list args;
63509 char module_name[MODULE_NAME_LEN];
63510 unsigned int max_modprobes;
63511 int ret;
63512- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63513+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63514 static char *envp[] = { "HOME=/",
63515 "TERM=linux",
63516 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63517@@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
63518 if (ret)
63519 return ret;
63520
63521- va_start(args, fmt);
63522- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63523- va_end(args);
63524+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63525 if (ret >= MODULE_NAME_LEN)
63526 return -ENAMETOOLONG;
63527
63528+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63529+ if (!current_uid()) {
63530+ /* hack to workaround consolekit/udisks stupidity */
63531+ read_lock(&tasklist_lock);
63532+ if (!strcmp(current->comm, "mount") &&
63533+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63534+ read_unlock(&tasklist_lock);
63535+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63536+ return -EPERM;
63537+ }
63538+ read_unlock(&tasklist_lock);
63539+ }
63540+#endif
63541+
63542 /* If modprobe needs a service that is in a module, we get a recursive
63543 * loop. Limit the number of running kmod threads to max_threads/2 or
63544 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63545@@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
63546 atomic_dec(&kmod_concurrent);
63547 return ret;
63548 }
63549+
63550+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63551+{
63552+ va_list args;
63553+ int ret;
63554+
63555+ va_start(args, fmt);
63556+ ret = ____request_module(wait, module_param, fmt, args);
63557+ va_end(args);
63558+
63559+ return ret;
63560+}
63561+
63562+int __request_module(bool wait, const char *fmt, ...)
63563+{
63564+ va_list args;
63565+ int ret;
63566+
63567+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63568+ if (current_uid()) {
63569+ char module_param[MODULE_NAME_LEN];
63570+
63571+ memset(module_param, 0, sizeof(module_param));
63572+
63573+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63574+
63575+ va_start(args, fmt);
63576+ ret = ____request_module(wait, module_param, fmt, args);
63577+ va_end(args);
63578+
63579+ return ret;
63580+ }
63581+#endif
63582+
63583+ va_start(args, fmt);
63584+ ret = ____request_module(wait, NULL, fmt, args);
63585+ va_end(args);
63586+
63587+ return ret;
63588+}
63589+
63590+
63591 EXPORT_SYMBOL(__request_module);
63592 #endif /* CONFIG_MODULES */
63593
63594diff -urNp linux-2.6.32.45/kernel/kprobes.c linux-2.6.32.45/kernel/kprobes.c
63595--- linux-2.6.32.45/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
63596+++ linux-2.6.32.45/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
63597@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
63598 * kernel image and loaded module images reside. This is required
63599 * so x86_64 can correctly handle the %rip-relative fixups.
63600 */
63601- kip->insns = module_alloc(PAGE_SIZE);
63602+ kip->insns = module_alloc_exec(PAGE_SIZE);
63603 if (!kip->insns) {
63604 kfree(kip);
63605 return NULL;
63606@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
63607 */
63608 if (!list_is_singular(&kprobe_insn_pages)) {
63609 list_del(&kip->list);
63610- module_free(NULL, kip->insns);
63611+ module_free_exec(NULL, kip->insns);
63612 kfree(kip);
63613 }
63614 return 1;
63615@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
63616 {
63617 int i, err = 0;
63618 unsigned long offset = 0, size = 0;
63619- char *modname, namebuf[128];
63620+ char *modname, namebuf[KSYM_NAME_LEN];
63621 const char *symbol_name;
63622 void *addr;
63623 struct kprobe_blackpoint *kb;
63624@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
63625 const char *sym = NULL;
63626 unsigned int i = *(loff_t *) v;
63627 unsigned long offset = 0;
63628- char *modname, namebuf[128];
63629+ char *modname, namebuf[KSYM_NAME_LEN];
63630
63631 head = &kprobe_table[i];
63632 preempt_disable();
63633diff -urNp linux-2.6.32.45/kernel/lockdep.c linux-2.6.32.45/kernel/lockdep.c
63634--- linux-2.6.32.45/kernel/lockdep.c 2011-06-25 12:55:35.000000000 -0400
63635+++ linux-2.6.32.45/kernel/lockdep.c 2011-06-25 12:56:37.000000000 -0400
63636@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
63637 /*
63638 * Various lockdep statistics:
63639 */
63640-atomic_t chain_lookup_hits;
63641-atomic_t chain_lookup_misses;
63642-atomic_t hardirqs_on_events;
63643-atomic_t hardirqs_off_events;
63644-atomic_t redundant_hardirqs_on;
63645-atomic_t redundant_hardirqs_off;
63646-atomic_t softirqs_on_events;
63647-atomic_t softirqs_off_events;
63648-atomic_t redundant_softirqs_on;
63649-atomic_t redundant_softirqs_off;
63650-atomic_t nr_unused_locks;
63651-atomic_t nr_cyclic_checks;
63652-atomic_t nr_find_usage_forwards_checks;
63653-atomic_t nr_find_usage_backwards_checks;
63654+atomic_unchecked_t chain_lookup_hits;
63655+atomic_unchecked_t chain_lookup_misses;
63656+atomic_unchecked_t hardirqs_on_events;
63657+atomic_unchecked_t hardirqs_off_events;
63658+atomic_unchecked_t redundant_hardirqs_on;
63659+atomic_unchecked_t redundant_hardirqs_off;
63660+atomic_unchecked_t softirqs_on_events;
63661+atomic_unchecked_t softirqs_off_events;
63662+atomic_unchecked_t redundant_softirqs_on;
63663+atomic_unchecked_t redundant_softirqs_off;
63664+atomic_unchecked_t nr_unused_locks;
63665+atomic_unchecked_t nr_cyclic_checks;
63666+atomic_unchecked_t nr_find_usage_forwards_checks;
63667+atomic_unchecked_t nr_find_usage_backwards_checks;
63668 #endif
63669
63670 /*
63671@@ -577,6 +577,10 @@ static int static_obj(void *obj)
63672 int i;
63673 #endif
63674
63675+#ifdef CONFIG_PAX_KERNEXEC
63676+ start = ktla_ktva(start);
63677+#endif
63678+
63679 /*
63680 * static variable?
63681 */
63682@@ -592,8 +596,7 @@ static int static_obj(void *obj)
63683 */
63684 for_each_possible_cpu(i) {
63685 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
63686- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
63687- + per_cpu_offset(i);
63688+ end = start + PERCPU_ENOUGH_ROOM;
63689
63690 if ((addr >= start) && (addr < end))
63691 return 1;
63692@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
63693 if (!static_obj(lock->key)) {
63694 debug_locks_off();
63695 printk("INFO: trying to register non-static key.\n");
63696+ printk("lock:%pS key:%pS.\n", lock, lock->key);
63697 printk("the code is fine but needs lockdep annotation.\n");
63698 printk("turning off the locking correctness validator.\n");
63699 dump_stack();
63700@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
63701 if (!class)
63702 return 0;
63703 }
63704- debug_atomic_inc((atomic_t *)&class->ops);
63705+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
63706 if (very_verbose(class)) {
63707 printk("\nacquire class [%p] %s", class->key, class->name);
63708 if (class->name_version > 1)
63709diff -urNp linux-2.6.32.45/kernel/lockdep_internals.h linux-2.6.32.45/kernel/lockdep_internals.h
63710--- linux-2.6.32.45/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
63711+++ linux-2.6.32.45/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
63712@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
63713 /*
63714 * Various lockdep statistics:
63715 */
63716-extern atomic_t chain_lookup_hits;
63717-extern atomic_t chain_lookup_misses;
63718-extern atomic_t hardirqs_on_events;
63719-extern atomic_t hardirqs_off_events;
63720-extern atomic_t redundant_hardirqs_on;
63721-extern atomic_t redundant_hardirqs_off;
63722-extern atomic_t softirqs_on_events;
63723-extern atomic_t softirqs_off_events;
63724-extern atomic_t redundant_softirqs_on;
63725-extern atomic_t redundant_softirqs_off;
63726-extern atomic_t nr_unused_locks;
63727-extern atomic_t nr_cyclic_checks;
63728-extern atomic_t nr_cyclic_check_recursions;
63729-extern atomic_t nr_find_usage_forwards_checks;
63730-extern atomic_t nr_find_usage_forwards_recursions;
63731-extern atomic_t nr_find_usage_backwards_checks;
63732-extern atomic_t nr_find_usage_backwards_recursions;
63733-# define debug_atomic_inc(ptr) atomic_inc(ptr)
63734-# define debug_atomic_dec(ptr) atomic_dec(ptr)
63735-# define debug_atomic_read(ptr) atomic_read(ptr)
63736+extern atomic_unchecked_t chain_lookup_hits;
63737+extern atomic_unchecked_t chain_lookup_misses;
63738+extern atomic_unchecked_t hardirqs_on_events;
63739+extern atomic_unchecked_t hardirqs_off_events;
63740+extern atomic_unchecked_t redundant_hardirqs_on;
63741+extern atomic_unchecked_t redundant_hardirqs_off;
63742+extern atomic_unchecked_t softirqs_on_events;
63743+extern atomic_unchecked_t softirqs_off_events;
63744+extern atomic_unchecked_t redundant_softirqs_on;
63745+extern atomic_unchecked_t redundant_softirqs_off;
63746+extern atomic_unchecked_t nr_unused_locks;
63747+extern atomic_unchecked_t nr_cyclic_checks;
63748+extern atomic_unchecked_t nr_cyclic_check_recursions;
63749+extern atomic_unchecked_t nr_find_usage_forwards_checks;
63750+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
63751+extern atomic_unchecked_t nr_find_usage_backwards_checks;
63752+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
63753+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
63754+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
63755+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
63756 #else
63757 # define debug_atomic_inc(ptr) do { } while (0)
63758 # define debug_atomic_dec(ptr) do { } while (0)
63759diff -urNp linux-2.6.32.45/kernel/lockdep_proc.c linux-2.6.32.45/kernel/lockdep_proc.c
63760--- linux-2.6.32.45/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
63761+++ linux-2.6.32.45/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
63762@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
63763
63764 static void print_name(struct seq_file *m, struct lock_class *class)
63765 {
63766- char str[128];
63767+ char str[KSYM_NAME_LEN];
63768 const char *name = class->name;
63769
63770 if (!name) {
63771diff -urNp linux-2.6.32.45/kernel/module.c linux-2.6.32.45/kernel/module.c
63772--- linux-2.6.32.45/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
63773+++ linux-2.6.32.45/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
63774@@ -55,6 +55,7 @@
63775 #include <linux/async.h>
63776 #include <linux/percpu.h>
63777 #include <linux/kmemleak.h>
63778+#include <linux/grsecurity.h>
63779
63780 #define CREATE_TRACE_POINTS
63781 #include <trace/events/module.h>
63782@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
63783 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
63784
63785 /* Bounds of module allocation, for speeding __module_address */
63786-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
63787+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
63788+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
63789
63790 int register_module_notifier(struct notifier_block * nb)
63791 {
63792@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
63793 return true;
63794
63795 list_for_each_entry_rcu(mod, &modules, list) {
63796- struct symsearch arr[] = {
63797+ struct symsearch modarr[] = {
63798 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
63799 NOT_GPL_ONLY, false },
63800 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
63801@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
63802 #endif
63803 };
63804
63805- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
63806+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
63807 return true;
63808 }
63809 return false;
63810@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
63811 void *ptr;
63812 int cpu;
63813
63814- if (align > PAGE_SIZE) {
63815+ if (align-1 >= PAGE_SIZE) {
63816 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
63817 name, align, PAGE_SIZE);
63818 align = PAGE_SIZE;
63819@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
63820 * /sys/module/foo/sections stuff
63821 * J. Corbet <corbet@lwn.net>
63822 */
63823-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
63824+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63825
63826 static inline bool sect_empty(const Elf_Shdr *sect)
63827 {
63828@@ -1545,7 +1547,8 @@ static void free_module(struct module *m
63829 destroy_params(mod->kp, mod->num_kp);
63830
63831 /* This may be NULL, but that's OK */
63832- module_free(mod, mod->module_init);
63833+ module_free(mod, mod->module_init_rw);
63834+ module_free_exec(mod, mod->module_init_rx);
63835 kfree(mod->args);
63836 if (mod->percpu)
63837 percpu_modfree(mod->percpu);
63838@@ -1554,10 +1557,12 @@ static void free_module(struct module *m
63839 percpu_modfree(mod->refptr);
63840 #endif
63841 /* Free lock-classes: */
63842- lockdep_free_key_range(mod->module_core, mod->core_size);
63843+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
63844+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
63845
63846 /* Finally, free the core (containing the module structure) */
63847- module_free(mod, mod->module_core);
63848+ module_free_exec(mod, mod->module_core_rx);
63849+ module_free(mod, mod->module_core_rw);
63850
63851 #ifdef CONFIG_MPU
63852 update_protections(current->mm);
63853@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
63854 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63855 int ret = 0;
63856 const struct kernel_symbol *ksym;
63857+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63858+ int is_fs_load = 0;
63859+ int register_filesystem_found = 0;
63860+ char *p;
63861+
63862+ p = strstr(mod->args, "grsec_modharden_fs");
63863+
63864+ if (p) {
63865+ char *endptr = p + strlen("grsec_modharden_fs");
63866+ /* copy \0 as well */
63867+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
63868+ is_fs_load = 1;
63869+ }
63870+#endif
63871+
63872
63873 for (i = 1; i < n; i++) {
63874+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63875+ const char *name = strtab + sym[i].st_name;
63876+
63877+ /* it's a real shame this will never get ripped and copied
63878+ upstream! ;(
63879+ */
63880+ if (is_fs_load && !strcmp(name, "register_filesystem"))
63881+ register_filesystem_found = 1;
63882+#endif
63883 switch (sym[i].st_shndx) {
63884 case SHN_COMMON:
63885 /* We compiled with -fno-common. These are not
63886@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
63887 strtab + sym[i].st_name, mod);
63888 /* Ok if resolved. */
63889 if (ksym) {
63890+ pax_open_kernel();
63891 sym[i].st_value = ksym->value;
63892+ pax_close_kernel();
63893 break;
63894 }
63895
63896@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
63897 secbase = (unsigned long)mod->percpu;
63898 else
63899 secbase = sechdrs[sym[i].st_shndx].sh_addr;
63900+ pax_open_kernel();
63901 sym[i].st_value += secbase;
63902+ pax_close_kernel();
63903 break;
63904 }
63905 }
63906
63907+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63908+ if (is_fs_load && !register_filesystem_found) {
63909+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
63910+ ret = -EPERM;
63911+ }
63912+#endif
63913+
63914 return ret;
63915 }
63916
63917@@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
63918 || s->sh_entsize != ~0UL
63919 || strstarts(secstrings + s->sh_name, ".init"))
63920 continue;
63921- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
63922+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63923+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
63924+ else
63925+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
63926 DEBUGP("\t%s\n", secstrings + s->sh_name);
63927 }
63928- if (m == 0)
63929- mod->core_text_size = mod->core_size;
63930 }
63931
63932 DEBUGP("Init section allocation order:\n");
63933@@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
63934 || s->sh_entsize != ~0UL
63935 || !strstarts(secstrings + s->sh_name, ".init"))
63936 continue;
63937- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
63938- | INIT_OFFSET_MASK);
63939+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
63940+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
63941+ else
63942+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
63943+ s->sh_entsize |= INIT_OFFSET_MASK;
63944 DEBUGP("\t%s\n", secstrings + s->sh_name);
63945 }
63946- if (m == 0)
63947- mod->init_text_size = mod->init_size;
63948 }
63949 }
63950
63951@@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
63952
63953 /* As per nm */
63954 static char elf_type(const Elf_Sym *sym,
63955- Elf_Shdr *sechdrs,
63956- const char *secstrings,
63957- struct module *mod)
63958+ const Elf_Shdr *sechdrs,
63959+ const char *secstrings)
63960 {
63961 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
63962 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
63963@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
63964
63965 /* Put symbol section at end of init part of module. */
63966 symsect->sh_flags |= SHF_ALLOC;
63967- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
63968+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
63969 symindex) | INIT_OFFSET_MASK;
63970 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
63971
63972@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
63973 }
63974
63975 /* Append room for core symbols at end of core part. */
63976- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
63977- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
63978+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
63979+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
63980
63981 /* Put string table section at end of init part of module. */
63982 strsect->sh_flags |= SHF_ALLOC;
63983- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
63984+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
63985 strindex) | INIT_OFFSET_MASK;
63986 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
63987
63988 /* Append room for core symbols' strings at end of core part. */
63989- *pstroffs = mod->core_size;
63990+ *pstroffs = mod->core_size_rx;
63991 __set_bit(0, strmap);
63992- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
63993+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
63994
63995 return symoffs;
63996 }
63997@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
63998 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
63999 mod->strtab = (void *)sechdrs[strindex].sh_addr;
64000
64001+ pax_open_kernel();
64002+
64003 /* Set types up while we still have access to sections. */
64004 for (i = 0; i < mod->num_symtab; i++)
64005 mod->symtab[i].st_info
64006- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
64007+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
64008
64009- mod->core_symtab = dst = mod->module_core + symoffs;
64010+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
64011 src = mod->symtab;
64012 *dst = *src;
64013 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64014@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
64015 }
64016 mod->core_num_syms = ndst;
64017
64018- mod->core_strtab = s = mod->module_core + stroffs;
64019+ mod->core_strtab = s = mod->module_core_rx + stroffs;
64020 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
64021 if (test_bit(i, strmap))
64022 *++s = mod->strtab[i];
64023+
64024+ pax_close_kernel();
64025 }
64026 #else
64027 static inline unsigned long layout_symtab(struct module *mod,
64028@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
64029 #endif
64030 }
64031
64032-static void *module_alloc_update_bounds(unsigned long size)
64033+static void *module_alloc_update_bounds_rw(unsigned long size)
64034 {
64035 void *ret = module_alloc(size);
64036
64037 if (ret) {
64038 /* Update module bounds. */
64039- if ((unsigned long)ret < module_addr_min)
64040- module_addr_min = (unsigned long)ret;
64041- if ((unsigned long)ret + size > module_addr_max)
64042- module_addr_max = (unsigned long)ret + size;
64043+ if ((unsigned long)ret < module_addr_min_rw)
64044+ module_addr_min_rw = (unsigned long)ret;
64045+ if ((unsigned long)ret + size > module_addr_max_rw)
64046+ module_addr_max_rw = (unsigned long)ret + size;
64047+ }
64048+ return ret;
64049+}
64050+
64051+static void *module_alloc_update_bounds_rx(unsigned long size)
64052+{
64053+ void *ret = module_alloc_exec(size);
64054+
64055+ if (ret) {
64056+ /* Update module bounds. */
64057+ if ((unsigned long)ret < module_addr_min_rx)
64058+ module_addr_min_rx = (unsigned long)ret;
64059+ if ((unsigned long)ret + size > module_addr_max_rx)
64060+ module_addr_max_rx = (unsigned long)ret + size;
64061 }
64062 return ret;
64063 }
64064@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
64065 unsigned int i;
64066
64067 /* only scan the sections containing data */
64068- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
64069- (unsigned long)mod->module_core,
64070+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
64071+ (unsigned long)mod->module_core_rw,
64072 sizeof(struct module), GFP_KERNEL);
64073
64074 for (i = 1; i < hdr->e_shnum; i++) {
64075@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
64076 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
64077 continue;
64078
64079- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
64080- (unsigned long)mod->module_core,
64081+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
64082+ (unsigned long)mod->module_core_rw,
64083 sechdrs[i].sh_size, GFP_KERNEL);
64084 }
64085 }
64086@@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
64087 secstrings, &stroffs, strmap);
64088
64089 /* Do the allocs. */
64090- ptr = module_alloc_update_bounds(mod->core_size);
64091+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64092 /*
64093 * The pointer to this block is stored in the module structure
64094 * which is inside the block. Just mark it as not being a
64095@@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
64096 err = -ENOMEM;
64097 goto free_percpu;
64098 }
64099- memset(ptr, 0, mod->core_size);
64100- mod->module_core = ptr;
64101+ memset(ptr, 0, mod->core_size_rw);
64102+ mod->module_core_rw = ptr;
64103
64104- ptr = module_alloc_update_bounds(mod->init_size);
64105+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64106 /*
64107 * The pointer to this block is stored in the module structure
64108 * which is inside the block. This block doesn't need to be
64109 * scanned as it contains data and code that will be freed
64110 * after the module is initialized.
64111 */
64112- kmemleak_ignore(ptr);
64113- if (!ptr && mod->init_size) {
64114+ kmemleak_not_leak(ptr);
64115+ if (!ptr && mod->init_size_rw) {
64116+ err = -ENOMEM;
64117+ goto free_core_rw;
64118+ }
64119+ memset(ptr, 0, mod->init_size_rw);
64120+ mod->module_init_rw = ptr;
64121+
64122+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64123+ kmemleak_not_leak(ptr);
64124+ if (!ptr) {
64125 err = -ENOMEM;
64126- goto free_core;
64127+ goto free_init_rw;
64128 }
64129- memset(ptr, 0, mod->init_size);
64130- mod->module_init = ptr;
64131+
64132+ pax_open_kernel();
64133+ memset(ptr, 0, mod->core_size_rx);
64134+ pax_close_kernel();
64135+ mod->module_core_rx = ptr;
64136+
64137+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64138+ kmemleak_not_leak(ptr);
64139+ if (!ptr && mod->init_size_rx) {
64140+ err = -ENOMEM;
64141+ goto free_core_rx;
64142+ }
64143+
64144+ pax_open_kernel();
64145+ memset(ptr, 0, mod->init_size_rx);
64146+ pax_close_kernel();
64147+ mod->module_init_rx = ptr;
64148
64149 /* Transfer each section which specifies SHF_ALLOC */
64150 DEBUGP("final section addresses:\n");
64151@@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
64152 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
64153 continue;
64154
64155- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
64156- dest = mod->module_init
64157- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
64158- else
64159- dest = mod->module_core + sechdrs[i].sh_entsize;
64160+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
64161+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
64162+ dest = mod->module_init_rw
64163+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
64164+ else
64165+ dest = mod->module_init_rx
64166+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
64167+ } else {
64168+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
64169+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
64170+ else
64171+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
64172+ }
64173+
64174+ if (sechdrs[i].sh_type != SHT_NOBITS) {
64175
64176- if (sechdrs[i].sh_type != SHT_NOBITS)
64177- memcpy(dest, (void *)sechdrs[i].sh_addr,
64178- sechdrs[i].sh_size);
64179+#ifdef CONFIG_PAX_KERNEXEC
64180+#ifdef CONFIG_X86_64
64181+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
64182+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64183+#endif
64184+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
64185+ pax_open_kernel();
64186+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
64187+ pax_close_kernel();
64188+ } else
64189+#endif
64190+
64191+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
64192+ }
64193 /* Update sh_addr to point to copy in image. */
64194- sechdrs[i].sh_addr = (unsigned long)dest;
64195+
64196+#ifdef CONFIG_PAX_KERNEXEC
64197+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
64198+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
64199+ else
64200+#endif
64201+
64202+ sechdrs[i].sh_addr = (unsigned long)dest;
64203 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
64204 }
64205 /* Module has been moved. */
64206@@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
64207 mod->name);
64208 if (!mod->refptr) {
64209 err = -ENOMEM;
64210- goto free_init;
64211+ goto free_init_rx;
64212 }
64213 #endif
64214 /* Now we've moved module, initialize linked lists, etc. */
64215@@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
64216 /* Set up MODINFO_ATTR fields */
64217 setup_modinfo(mod, sechdrs, infoindex);
64218
64219+ mod->args = args;
64220+
64221+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64222+ {
64223+ char *p, *p2;
64224+
64225+ if (strstr(mod->args, "grsec_modharden_netdev")) {
64226+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64227+ err = -EPERM;
64228+ goto cleanup;
64229+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64230+ p += strlen("grsec_modharden_normal");
64231+ p2 = strstr(p, "_");
64232+ if (p2) {
64233+ *p2 = '\0';
64234+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64235+ *p2 = '_';
64236+ }
64237+ err = -EPERM;
64238+ goto cleanup;
64239+ }
64240+ }
64241+#endif
64242+
64243+
64244 /* Fix up syms, so that st_value is a pointer to location. */
64245 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
64246 mod);
64247@@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
64248
64249 /* Now do relocations. */
64250 for (i = 1; i < hdr->e_shnum; i++) {
64251- const char *strtab = (char *)sechdrs[strindex].sh_addr;
64252 unsigned int info = sechdrs[i].sh_info;
64253+ strtab = (char *)sechdrs[strindex].sh_addr;
64254
64255 /* Not a valid relocation section? */
64256 if (info >= hdr->e_shnum)
64257@@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
64258 * Do it before processing of module parameters, so the module
64259 * can provide parameter accessor functions of its own.
64260 */
64261- if (mod->module_init)
64262- flush_icache_range((unsigned long)mod->module_init,
64263- (unsigned long)mod->module_init
64264- + mod->init_size);
64265- flush_icache_range((unsigned long)mod->module_core,
64266- (unsigned long)mod->module_core + mod->core_size);
64267+ if (mod->module_init_rx)
64268+ flush_icache_range((unsigned long)mod->module_init_rx,
64269+ (unsigned long)mod->module_init_rx
64270+ + mod->init_size_rx);
64271+ flush_icache_range((unsigned long)mod->module_core_rx,
64272+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
64273
64274 set_fs(old_fs);
64275
64276- mod->args = args;
64277 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
64278 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
64279 mod->name);
64280@@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
64281 free_unload:
64282 module_unload_free(mod);
64283 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
64284+ free_init_rx:
64285 percpu_modfree(mod->refptr);
64286- free_init:
64287 #endif
64288- module_free(mod, mod->module_init);
64289- free_core:
64290- module_free(mod, mod->module_core);
64291+ module_free_exec(mod, mod->module_init_rx);
64292+ free_core_rx:
64293+ module_free_exec(mod, mod->module_core_rx);
64294+ free_init_rw:
64295+ module_free(mod, mod->module_init_rw);
64296+ free_core_rw:
64297+ module_free(mod, mod->module_core_rw);
64298 /* mod will be freed with core. Don't access it beyond this line! */
64299 free_percpu:
64300 if (percpu)
64301@@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
64302 mod->symtab = mod->core_symtab;
64303 mod->strtab = mod->core_strtab;
64304 #endif
64305- module_free(mod, mod->module_init);
64306- mod->module_init = NULL;
64307- mod->init_size = 0;
64308- mod->init_text_size = 0;
64309+ module_free(mod, mod->module_init_rw);
64310+ module_free_exec(mod, mod->module_init_rx);
64311+ mod->module_init_rw = NULL;
64312+ mod->module_init_rx = NULL;
64313+ mod->init_size_rw = 0;
64314+ mod->init_size_rx = 0;
64315 mutex_unlock(&module_mutex);
64316
64317 return 0;
64318@@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
64319 unsigned long nextval;
64320
64321 /* At worse, next value is at end of module */
64322- if (within_module_init(addr, mod))
64323- nextval = (unsigned long)mod->module_init+mod->init_text_size;
64324+ if (within_module_init_rx(addr, mod))
64325+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64326+ else if (within_module_init_rw(addr, mod))
64327+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64328+ else if (within_module_core_rx(addr, mod))
64329+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64330+ else if (within_module_core_rw(addr, mod))
64331+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64332 else
64333- nextval = (unsigned long)mod->module_core+mod->core_text_size;
64334+ return NULL;
64335
64336 /* Scan for closest preceeding symbol, and next symbol. (ELF
64337 starts real symbols at 1). */
64338@@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
64339 char buf[8];
64340
64341 seq_printf(m, "%s %u",
64342- mod->name, mod->init_size + mod->core_size);
64343+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64344 print_unload_info(m, mod);
64345
64346 /* Informative for users. */
64347@@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
64348 mod->state == MODULE_STATE_COMING ? "Loading":
64349 "Live");
64350 /* Used by oprofile and other similar tools. */
64351- seq_printf(m, " 0x%p", mod->module_core);
64352+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
64353
64354 /* Taints info */
64355 if (mod->taints)
64356@@ -2981,7 +3128,17 @@ static const struct file_operations proc
64357
64358 static int __init proc_modules_init(void)
64359 {
64360+#ifndef CONFIG_GRKERNSEC_HIDESYM
64361+#ifdef CONFIG_GRKERNSEC_PROC_USER
64362+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64363+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64364+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64365+#else
64366 proc_create("modules", 0, NULL, &proc_modules_operations);
64367+#endif
64368+#else
64369+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64370+#endif
64371 return 0;
64372 }
64373 module_init(proc_modules_init);
64374@@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
64375 {
64376 struct module *mod;
64377
64378- if (addr < module_addr_min || addr > module_addr_max)
64379+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64380+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
64381 return NULL;
64382
64383 list_for_each_entry_rcu(mod, &modules, list)
64384- if (within_module_core(addr, mod)
64385- || within_module_init(addr, mod))
64386+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
64387 return mod;
64388 return NULL;
64389 }
64390@@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
64391 */
64392 struct module *__module_text_address(unsigned long addr)
64393 {
64394- struct module *mod = __module_address(addr);
64395+ struct module *mod;
64396+
64397+#ifdef CONFIG_X86_32
64398+ addr = ktla_ktva(addr);
64399+#endif
64400+
64401+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64402+ return NULL;
64403+
64404+ mod = __module_address(addr);
64405+
64406 if (mod) {
64407 /* Make sure it's within the text section. */
64408- if (!within(addr, mod->module_init, mod->init_text_size)
64409- && !within(addr, mod->module_core, mod->core_text_size))
64410+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64411 mod = NULL;
64412 }
64413 return mod;
64414diff -urNp linux-2.6.32.45/kernel/mutex.c linux-2.6.32.45/kernel/mutex.c
64415--- linux-2.6.32.45/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
64416+++ linux-2.6.32.45/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
64417@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
64418 */
64419
64420 for (;;) {
64421- struct thread_info *owner;
64422+ struct task_struct *owner;
64423
64424 /*
64425 * If we own the BKL, then don't spin. The owner of
64426@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
64427 spin_lock_mutex(&lock->wait_lock, flags);
64428
64429 debug_mutex_lock_common(lock, &waiter);
64430- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64431+ debug_mutex_add_waiter(lock, &waiter, task);
64432
64433 /* add waiting tasks to the end of the waitqueue (FIFO): */
64434 list_add_tail(&waiter.list, &lock->wait_list);
64435@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
64436 * TASK_UNINTERRUPTIBLE case.)
64437 */
64438 if (unlikely(signal_pending_state(state, task))) {
64439- mutex_remove_waiter(lock, &waiter,
64440- task_thread_info(task));
64441+ mutex_remove_waiter(lock, &waiter, task);
64442 mutex_release(&lock->dep_map, 1, ip);
64443 spin_unlock_mutex(&lock->wait_lock, flags);
64444
64445@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
64446 done:
64447 lock_acquired(&lock->dep_map, ip);
64448 /* got the lock - rejoice! */
64449- mutex_remove_waiter(lock, &waiter, current_thread_info());
64450+ mutex_remove_waiter(lock, &waiter, task);
64451 mutex_set_owner(lock);
64452
64453 /* set it to 0 if there are no waiters left: */
64454diff -urNp linux-2.6.32.45/kernel/mutex-debug.c linux-2.6.32.45/kernel/mutex-debug.c
64455--- linux-2.6.32.45/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
64456+++ linux-2.6.32.45/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
64457@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64458 }
64459
64460 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64461- struct thread_info *ti)
64462+ struct task_struct *task)
64463 {
64464 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64465
64466 /* Mark the current thread as blocked on the lock: */
64467- ti->task->blocked_on = waiter;
64468+ task->blocked_on = waiter;
64469 }
64470
64471 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64472- struct thread_info *ti)
64473+ struct task_struct *task)
64474 {
64475 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64476- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64477- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64478- ti->task->blocked_on = NULL;
64479+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
64480+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64481+ task->blocked_on = NULL;
64482
64483 list_del_init(&waiter->list);
64484 waiter->task = NULL;
64485@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
64486 return;
64487
64488 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
64489- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
64490+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
64491 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
64492 mutex_clear_owner(lock);
64493 }
64494diff -urNp linux-2.6.32.45/kernel/mutex-debug.h linux-2.6.32.45/kernel/mutex-debug.h
64495--- linux-2.6.32.45/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
64496+++ linux-2.6.32.45/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
64497@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
64498 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64499 extern void debug_mutex_add_waiter(struct mutex *lock,
64500 struct mutex_waiter *waiter,
64501- struct thread_info *ti);
64502+ struct task_struct *task);
64503 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64504- struct thread_info *ti);
64505+ struct task_struct *task);
64506 extern void debug_mutex_unlock(struct mutex *lock);
64507 extern void debug_mutex_init(struct mutex *lock, const char *name,
64508 struct lock_class_key *key);
64509
64510 static inline void mutex_set_owner(struct mutex *lock)
64511 {
64512- lock->owner = current_thread_info();
64513+ lock->owner = current;
64514 }
64515
64516 static inline void mutex_clear_owner(struct mutex *lock)
64517diff -urNp linux-2.6.32.45/kernel/mutex.h linux-2.6.32.45/kernel/mutex.h
64518--- linux-2.6.32.45/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
64519+++ linux-2.6.32.45/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
64520@@ -19,7 +19,7 @@
64521 #ifdef CONFIG_SMP
64522 static inline void mutex_set_owner(struct mutex *lock)
64523 {
64524- lock->owner = current_thread_info();
64525+ lock->owner = current;
64526 }
64527
64528 static inline void mutex_clear_owner(struct mutex *lock)
64529diff -urNp linux-2.6.32.45/kernel/panic.c linux-2.6.32.45/kernel/panic.c
64530--- linux-2.6.32.45/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
64531+++ linux-2.6.32.45/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
64532@@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
64533 const char *board;
64534
64535 printk(KERN_WARNING "------------[ cut here ]------------\n");
64536- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64537+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64538 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64539 if (board)
64540 printk(KERN_WARNING "Hardware name: %s\n", board);
64541@@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64542 */
64543 void __stack_chk_fail(void)
64544 {
64545- panic("stack-protector: Kernel stack is corrupted in: %p\n",
64546+ dump_stack();
64547+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64548 __builtin_return_address(0));
64549 }
64550 EXPORT_SYMBOL(__stack_chk_fail);
64551diff -urNp linux-2.6.32.45/kernel/params.c linux-2.6.32.45/kernel/params.c
64552--- linux-2.6.32.45/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
64553+++ linux-2.6.32.45/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
64554@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
64555 return ret;
64556 }
64557
64558-static struct sysfs_ops module_sysfs_ops = {
64559+static const struct sysfs_ops module_sysfs_ops = {
64560 .show = module_attr_show,
64561 .store = module_attr_store,
64562 };
64563@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
64564 return 0;
64565 }
64566
64567-static struct kset_uevent_ops module_uevent_ops = {
64568+static const struct kset_uevent_ops module_uevent_ops = {
64569 .filter = uevent_filter,
64570 };
64571
64572diff -urNp linux-2.6.32.45/kernel/perf_event.c linux-2.6.32.45/kernel/perf_event.c
64573--- linux-2.6.32.45/kernel/perf_event.c 2011-08-09 18:35:30.000000000 -0400
64574+++ linux-2.6.32.45/kernel/perf_event.c 2011-08-09 18:34:01.000000000 -0400
64575@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
64576 */
64577 int sysctl_perf_event_sample_rate __read_mostly = 100000;
64578
64579-static atomic64_t perf_event_id;
64580+static atomic64_unchecked_t perf_event_id;
64581
64582 /*
64583 * Lock for (sysadmin-configurable) event reservations:
64584@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
64585 * In order to keep per-task stats reliable we need to flip the event
64586 * values when we flip the contexts.
64587 */
64588- value = atomic64_read(&next_event->count);
64589- value = atomic64_xchg(&event->count, value);
64590- atomic64_set(&next_event->count, value);
64591+ value = atomic64_read_unchecked(&next_event->count);
64592+ value = atomic64_xchg_unchecked(&event->count, value);
64593+ atomic64_set_unchecked(&next_event->count, value);
64594
64595 swap(event->total_time_enabled, next_event->total_time_enabled);
64596 swap(event->total_time_running, next_event->total_time_running);
64597@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
64598 update_event_times(event);
64599 }
64600
64601- return atomic64_read(&event->count);
64602+ return atomic64_read_unchecked(&event->count);
64603 }
64604
64605 /*
64606@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
64607 values[n++] = 1 + leader->nr_siblings;
64608 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64609 values[n++] = leader->total_time_enabled +
64610- atomic64_read(&leader->child_total_time_enabled);
64611+ atomic64_read_unchecked(&leader->child_total_time_enabled);
64612 }
64613 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64614 values[n++] = leader->total_time_running +
64615- atomic64_read(&leader->child_total_time_running);
64616+ atomic64_read_unchecked(&leader->child_total_time_running);
64617 }
64618
64619 size = n * sizeof(u64);
64620@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
64621 values[n++] = perf_event_read_value(event);
64622 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64623 values[n++] = event->total_time_enabled +
64624- atomic64_read(&event->child_total_time_enabled);
64625+ atomic64_read_unchecked(&event->child_total_time_enabled);
64626 }
64627 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64628 values[n++] = event->total_time_running +
64629- atomic64_read(&event->child_total_time_running);
64630+ atomic64_read_unchecked(&event->child_total_time_running);
64631 }
64632 if (read_format & PERF_FORMAT_ID)
64633 values[n++] = primary_event_id(event);
64634@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
64635 static void perf_event_reset(struct perf_event *event)
64636 {
64637 (void)perf_event_read(event);
64638- atomic64_set(&event->count, 0);
64639+ atomic64_set_unchecked(&event->count, 0);
64640 perf_event_update_userpage(event);
64641 }
64642
64643@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
64644 ++userpg->lock;
64645 barrier();
64646 userpg->index = perf_event_index(event);
64647- userpg->offset = atomic64_read(&event->count);
64648+ userpg->offset = atomic64_read_unchecked(&event->count);
64649 if (event->state == PERF_EVENT_STATE_ACTIVE)
64650- userpg->offset -= atomic64_read(&event->hw.prev_count);
64651+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
64652
64653 userpg->time_enabled = event->total_time_enabled +
64654- atomic64_read(&event->child_total_time_enabled);
64655+ atomic64_read_unchecked(&event->child_total_time_enabled);
64656
64657 userpg->time_running = event->total_time_running +
64658- atomic64_read(&event->child_total_time_running);
64659+ atomic64_read_unchecked(&event->child_total_time_running);
64660
64661 barrier();
64662 ++userpg->lock;
64663@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
64664 u64 values[4];
64665 int n = 0;
64666
64667- values[n++] = atomic64_read(&event->count);
64668+ values[n++] = atomic64_read_unchecked(&event->count);
64669 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64670 values[n++] = event->total_time_enabled +
64671- atomic64_read(&event->child_total_time_enabled);
64672+ atomic64_read_unchecked(&event->child_total_time_enabled);
64673 }
64674 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64675 values[n++] = event->total_time_running +
64676- atomic64_read(&event->child_total_time_running);
64677+ atomic64_read_unchecked(&event->child_total_time_running);
64678 }
64679 if (read_format & PERF_FORMAT_ID)
64680 values[n++] = primary_event_id(event);
64681@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
64682 if (leader != event)
64683 leader->pmu->read(leader);
64684
64685- values[n++] = atomic64_read(&leader->count);
64686+ values[n++] = atomic64_read_unchecked(&leader->count);
64687 if (read_format & PERF_FORMAT_ID)
64688 values[n++] = primary_event_id(leader);
64689
64690@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
64691 if (sub != event)
64692 sub->pmu->read(sub);
64693
64694- values[n++] = atomic64_read(&sub->count);
64695+ values[n++] = atomic64_read_unchecked(&sub->count);
64696 if (read_format & PERF_FORMAT_ID)
64697 values[n++] = primary_event_id(sub);
64698
64699@@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf
64700 {
64701 struct hw_perf_event *hwc = &event->hw;
64702
64703- atomic64_add(nr, &event->count);
64704+ atomic64_add_unchecked(nr, &event->count);
64705
64706 if (!hwc->sample_period)
64707 return;
64708@@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(
64709 u64 now;
64710
64711 now = cpu_clock(cpu);
64712- prev = atomic64_read(&event->hw.prev_count);
64713- atomic64_set(&event->hw.prev_count, now);
64714- atomic64_add(now - prev, &event->count);
64715+ prev = atomic64_read_unchecked(&event->hw.prev_count);
64716+ atomic64_set_unchecked(&event->hw.prev_count, now);
64717+ atomic64_add_unchecked(now - prev, &event->count);
64718 }
64719
64720 static int cpu_clock_perf_event_enable(struct perf_event *event)
64721@@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(s
64722 struct hw_perf_event *hwc = &event->hw;
64723 int cpu = raw_smp_processor_id();
64724
64725- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
64726+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
64727 perf_swevent_start_hrtimer(event);
64728
64729 return 0;
64730@@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update
64731 u64 prev;
64732 s64 delta;
64733
64734- prev = atomic64_xchg(&event->hw.prev_count, now);
64735+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
64736 delta = now - prev;
64737- atomic64_add(delta, &event->count);
64738+ atomic64_add_unchecked(delta, &event->count);
64739 }
64740
64741 static int task_clock_perf_event_enable(struct perf_event *event)
64742@@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(
64743
64744 now = event->ctx->time;
64745
64746- atomic64_set(&hwc->prev_count, now);
64747+ atomic64_set_unchecked(&hwc->prev_count, now);
64748
64749 perf_swevent_start_hrtimer(event);
64750
64751@@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr
64752 event->parent = parent_event;
64753
64754 event->ns = get_pid_ns(current->nsproxy->pid_ns);
64755- event->id = atomic64_inc_return(&perf_event_id);
64756+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
64757
64758 event->state = PERF_EVENT_STATE_INACTIVE;
64759
64760@@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf
64761 if (child_event->attr.inherit_stat)
64762 perf_event_read_event(child_event, child);
64763
64764- child_val = atomic64_read(&child_event->count);
64765+ child_val = atomic64_read_unchecked(&child_event->count);
64766
64767 /*
64768 * Add back the child's count to the parent's count:
64769 */
64770- atomic64_add(child_val, &parent_event->count);
64771- atomic64_add(child_event->total_time_enabled,
64772+ atomic64_add_unchecked(child_val, &parent_event->count);
64773+ atomic64_add_unchecked(child_event->total_time_enabled,
64774 &parent_event->child_total_time_enabled);
64775- atomic64_add(child_event->total_time_running,
64776+ atomic64_add_unchecked(child_event->total_time_running,
64777 &parent_event->child_total_time_running);
64778
64779 /*
64780diff -urNp linux-2.6.32.45/kernel/pid.c linux-2.6.32.45/kernel/pid.c
64781--- linux-2.6.32.45/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
64782+++ linux-2.6.32.45/kernel/pid.c 2011-08-21 19:11:29.000000000 -0400
64783@@ -33,6 +33,7 @@
64784 #include <linux/rculist.h>
64785 #include <linux/bootmem.h>
64786 #include <linux/hash.h>
64787+#include <linux/security.h>
64788 #include <linux/pid_namespace.h>
64789 #include <linux/init_task.h>
64790 #include <linux/syscalls.h>
64791@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
64792
64793 int pid_max = PID_MAX_DEFAULT;
64794
64795-#define RESERVED_PIDS 300
64796+#define RESERVED_PIDS 500
64797
64798 int pid_max_min = RESERVED_PIDS + 1;
64799 int pid_max_max = PID_MAX_LIMIT;
64800@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
64801 */
64802 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
64803 {
64804- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64805+ struct task_struct *task;
64806+
64807+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
64808+
64809+ if (gr_pid_is_chrooted(task))
64810+ return NULL;
64811+
64812+ return task;
64813 }
64814
64815 struct task_struct *find_task_by_vpid(pid_t vnr)
64816@@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pi
64817 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
64818 }
64819
64820+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
64821+{
64822+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
64823+}
64824+
64825 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
64826 {
64827 struct pid *pid;
64828diff -urNp linux-2.6.32.45/kernel/posix-cpu-timers.c linux-2.6.32.45/kernel/posix-cpu-timers.c
64829--- linux-2.6.32.45/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
64830+++ linux-2.6.32.45/kernel/posix-cpu-timers.c 2011-08-06 09:33:44.000000000 -0400
64831@@ -6,6 +6,7 @@
64832 #include <linux/posix-timers.h>
64833 #include <linux/errno.h>
64834 #include <linux/math64.h>
64835+#include <linux/security.h>
64836 #include <asm/uaccess.h>
64837 #include <linux/kernel_stat.h>
64838 #include <trace/events/timer.h>
64839@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(st
64840
64841 static __init int init_posix_cpu_timers(void)
64842 {
64843- struct k_clock process = {
64844+ static struct k_clock process = {
64845 .clock_getres = process_cpu_clock_getres,
64846 .clock_get = process_cpu_clock_get,
64847 .clock_set = do_posix_clock_nosettime,
64848@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(
64849 .nsleep = process_cpu_nsleep,
64850 .nsleep_restart = process_cpu_nsleep_restart,
64851 };
64852- struct k_clock thread = {
64853+ static struct k_clock thread = {
64854 .clock_getres = thread_cpu_clock_getres,
64855 .clock_get = thread_cpu_clock_get,
64856 .clock_set = do_posix_clock_nosettime,
64857diff -urNp linux-2.6.32.45/kernel/posix-timers.c linux-2.6.32.45/kernel/posix-timers.c
64858--- linux-2.6.32.45/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
64859+++ linux-2.6.32.45/kernel/posix-timers.c 2011-08-06 09:34:14.000000000 -0400
64860@@ -42,6 +42,7 @@
64861 #include <linux/compiler.h>
64862 #include <linux/idr.h>
64863 #include <linux/posix-timers.h>
64864+#include <linux/grsecurity.h>
64865 #include <linux/syscalls.h>
64866 #include <linux/wait.h>
64867 #include <linux/workqueue.h>
64868@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
64869 * which we beg off on and pass to do_sys_settimeofday().
64870 */
64871
64872-static struct k_clock posix_clocks[MAX_CLOCKS];
64873+static struct k_clock *posix_clocks[MAX_CLOCKS];
64874
64875 /*
64876 * These ones are defined below.
64877@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k
64878 */
64879 #define CLOCK_DISPATCH(clock, call, arglist) \
64880 ((clock) < 0 ? posix_cpu_##call arglist : \
64881- (posix_clocks[clock].call != NULL \
64882- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
64883+ (posix_clocks[clock]->call != NULL \
64884+ ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
64885
64886 /*
64887 * Default clock hook functions when the struct k_clock passed
64888@@ -172,7 +173,7 @@ static inline int common_clock_getres(co
64889 struct timespec *tp)
64890 {
64891 tp->tv_sec = 0;
64892- tp->tv_nsec = posix_clocks[which_clock].res;
64893+ tp->tv_nsec = posix_clocks[which_clock]->res;
64894 return 0;
64895 }
64896
64897@@ -217,9 +218,11 @@ static inline int invalid_clockid(const
64898 return 0;
64899 if ((unsigned) which_clock >= MAX_CLOCKS)
64900 return 1;
64901- if (posix_clocks[which_clock].clock_getres != NULL)
64902+ if (!posix_clocks[which_clock])
64903 return 0;
64904- if (posix_clocks[which_clock].res != 0)
64905+ if (posix_clocks[which_clock]->clock_getres != NULL)
64906+ return 0;
64907+ if (posix_clocks[which_clock]->res != 0)
64908 return 0;
64909 return 1;
64910 }
64911@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t
64912 */
64913 static __init int init_posix_timers(void)
64914 {
64915- struct k_clock clock_realtime = {
64916+ static struct k_clock clock_realtime = {
64917 .clock_getres = hrtimer_get_res,
64918 };
64919- struct k_clock clock_monotonic = {
64920+ static struct k_clock clock_monotonic = {
64921 .clock_getres = hrtimer_get_res,
64922 .clock_get = posix_ktime_get_ts,
64923 .clock_set = do_posix_clock_nosettime,
64924 };
64925- struct k_clock clock_monotonic_raw = {
64926+ static struct k_clock clock_monotonic_raw = {
64927 .clock_getres = hrtimer_get_res,
64928 .clock_get = posix_get_monotonic_raw,
64929 .clock_set = do_posix_clock_nosettime,
64930 .timer_create = no_timer_create,
64931 .nsleep = no_nsleep,
64932 };
64933- struct k_clock clock_realtime_coarse = {
64934+ static struct k_clock clock_realtime_coarse = {
64935 .clock_getres = posix_get_coarse_res,
64936 .clock_get = posix_get_realtime_coarse,
64937 .clock_set = do_posix_clock_nosettime,
64938 .timer_create = no_timer_create,
64939 .nsleep = no_nsleep,
64940 };
64941- struct k_clock clock_monotonic_coarse = {
64942+ static struct k_clock clock_monotonic_coarse = {
64943 .clock_getres = posix_get_coarse_res,
64944 .clock_get = posix_get_monotonic_coarse,
64945 .clock_set = do_posix_clock_nosettime,
64946@@ -296,6 +299,8 @@ static __init int init_posix_timers(void
64947 .nsleep = no_nsleep,
64948 };
64949
64950+ pax_track_stack();
64951+
64952 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
64953 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
64954 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
64955@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_
64956 return;
64957 }
64958
64959- posix_clocks[clock_id] = *new_clock;
64960+ posix_clocks[clock_id] = new_clock;
64961 }
64962 EXPORT_SYMBOL_GPL(register_posix_clock);
64963
64964@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
64965 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
64966 return -EFAULT;
64967
64968+ /* only the CLOCK_REALTIME clock can be set, all other clocks
64969+ have their clock_set fptr set to a nosettime dummy function
64970+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
64971+ call common_clock_set, which calls do_sys_settimeofday, which
64972+ we hook
64973+ */
64974+
64975 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
64976 }
64977
64978diff -urNp linux-2.6.32.45/kernel/power/hibernate.c linux-2.6.32.45/kernel/power/hibernate.c
64979--- linux-2.6.32.45/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
64980+++ linux-2.6.32.45/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
64981@@ -48,14 +48,14 @@ enum {
64982
64983 static int hibernation_mode = HIBERNATION_SHUTDOWN;
64984
64985-static struct platform_hibernation_ops *hibernation_ops;
64986+static const struct platform_hibernation_ops *hibernation_ops;
64987
64988 /**
64989 * hibernation_set_ops - set the global hibernate operations
64990 * @ops: the hibernation operations to use in subsequent hibernation transitions
64991 */
64992
64993-void hibernation_set_ops(struct platform_hibernation_ops *ops)
64994+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
64995 {
64996 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
64997 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
64998diff -urNp linux-2.6.32.45/kernel/power/poweroff.c linux-2.6.32.45/kernel/power/poweroff.c
64999--- linux-2.6.32.45/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
65000+++ linux-2.6.32.45/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
65001@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
65002 .enable_mask = SYSRQ_ENABLE_BOOT,
65003 };
65004
65005-static int pm_sysrq_init(void)
65006+static int __init pm_sysrq_init(void)
65007 {
65008 register_sysrq_key('o', &sysrq_poweroff_op);
65009 return 0;
65010diff -urNp linux-2.6.32.45/kernel/power/process.c linux-2.6.32.45/kernel/power/process.c
65011--- linux-2.6.32.45/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
65012+++ linux-2.6.32.45/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
65013@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
65014 struct timeval start, end;
65015 u64 elapsed_csecs64;
65016 unsigned int elapsed_csecs;
65017+ bool timedout = false;
65018
65019 do_gettimeofday(&start);
65020
65021 end_time = jiffies + TIMEOUT;
65022 do {
65023 todo = 0;
65024+ if (time_after(jiffies, end_time))
65025+ timedout = true;
65026 read_lock(&tasklist_lock);
65027 do_each_thread(g, p) {
65028 if (frozen(p) || !freezeable(p))
65029@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
65030 * It is "frozen enough". If the task does wake
65031 * up, it will immediately call try_to_freeze.
65032 */
65033- if (!task_is_stopped_or_traced(p) &&
65034- !freezer_should_skip(p))
65035+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
65036 todo++;
65037+ if (timedout) {
65038+ printk(KERN_ERR "Task refusing to freeze:\n");
65039+ sched_show_task(p);
65040+ }
65041+ }
65042 } while_each_thread(g, p);
65043 read_unlock(&tasklist_lock);
65044 yield(); /* Yield is okay here */
65045- if (time_after(jiffies, end_time))
65046- break;
65047- } while (todo);
65048+ } while (todo && !timedout);
65049
65050 do_gettimeofday(&end);
65051 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
65052diff -urNp linux-2.6.32.45/kernel/power/suspend.c linux-2.6.32.45/kernel/power/suspend.c
65053--- linux-2.6.32.45/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
65054+++ linux-2.6.32.45/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
65055@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
65056 [PM_SUSPEND_MEM] = "mem",
65057 };
65058
65059-static struct platform_suspend_ops *suspend_ops;
65060+static const struct platform_suspend_ops *suspend_ops;
65061
65062 /**
65063 * suspend_set_ops - Set the global suspend method table.
65064 * @ops: Pointer to ops structure.
65065 */
65066-void suspend_set_ops(struct platform_suspend_ops *ops)
65067+void suspend_set_ops(const struct platform_suspend_ops *ops)
65068 {
65069 mutex_lock(&pm_mutex);
65070 suspend_ops = ops;
65071diff -urNp linux-2.6.32.45/kernel/printk.c linux-2.6.32.45/kernel/printk.c
65072--- linux-2.6.32.45/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
65073+++ linux-2.6.32.45/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
65074@@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
65075 char c;
65076 int error = 0;
65077
65078+#ifdef CONFIG_GRKERNSEC_DMESG
65079+ if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
65080+ return -EPERM;
65081+#endif
65082+
65083 error = security_syslog(type);
65084 if (error)
65085 return error;
65086diff -urNp linux-2.6.32.45/kernel/profile.c linux-2.6.32.45/kernel/profile.c
65087--- linux-2.6.32.45/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
65088+++ linux-2.6.32.45/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
65089@@ -39,7 +39,7 @@ struct profile_hit {
65090 /* Oprofile timer tick hook */
65091 static int (*timer_hook)(struct pt_regs *) __read_mostly;
65092
65093-static atomic_t *prof_buffer;
65094+static atomic_unchecked_t *prof_buffer;
65095 static unsigned long prof_len, prof_shift;
65096
65097 int prof_on __read_mostly;
65098@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
65099 hits[i].pc = 0;
65100 continue;
65101 }
65102- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65103+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65104 hits[i].hits = hits[i].pc = 0;
65105 }
65106 }
65107@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
65108 * Add the current hit(s) and flush the write-queue out
65109 * to the global buffer:
65110 */
65111- atomic_add(nr_hits, &prof_buffer[pc]);
65112+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
65113 for (i = 0; i < NR_PROFILE_HIT; ++i) {
65114- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65115+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65116 hits[i].pc = hits[i].hits = 0;
65117 }
65118 out:
65119@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
65120 if (prof_on != type || !prof_buffer)
65121 return;
65122 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
65123- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65124+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65125 }
65126 #endif /* !CONFIG_SMP */
65127 EXPORT_SYMBOL_GPL(profile_hits);
65128@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
65129 return -EFAULT;
65130 buf++; p++; count--; read++;
65131 }
65132- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
65133+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
65134 if (copy_to_user(buf, (void *)pnt, count))
65135 return -EFAULT;
65136 read += count;
65137@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
65138 }
65139 #endif
65140 profile_discard_flip_buffers();
65141- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
65142+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
65143 return count;
65144 }
65145
65146diff -urNp linux-2.6.32.45/kernel/ptrace.c linux-2.6.32.45/kernel/ptrace.c
65147--- linux-2.6.32.45/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
65148+++ linux-2.6.32.45/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
65149@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
65150 return ret;
65151 }
65152
65153-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65154+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
65155+ unsigned int log)
65156 {
65157 const struct cred *cred = current_cred(), *tcred;
65158
65159@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
65160 cred->gid != tcred->egid ||
65161 cred->gid != tcred->sgid ||
65162 cred->gid != tcred->gid) &&
65163- !capable(CAP_SYS_PTRACE)) {
65164+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
65165+ (log && !capable(CAP_SYS_PTRACE)))
65166+ ) {
65167 rcu_read_unlock();
65168 return -EPERM;
65169 }
65170@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
65171 smp_rmb();
65172 if (task->mm)
65173 dumpable = get_dumpable(task->mm);
65174- if (!dumpable && !capable(CAP_SYS_PTRACE))
65175+ if (!dumpable &&
65176+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
65177+ (log && !capable(CAP_SYS_PTRACE))))
65178 return -EPERM;
65179
65180 return security_ptrace_access_check(task, mode);
65181@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
65182 {
65183 int err;
65184 task_lock(task);
65185- err = __ptrace_may_access(task, mode);
65186+ err = __ptrace_may_access(task, mode, 0);
65187+ task_unlock(task);
65188+ return !err;
65189+}
65190+
65191+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65192+{
65193+ int err;
65194+ task_lock(task);
65195+ err = __ptrace_may_access(task, mode, 1);
65196 task_unlock(task);
65197 return !err;
65198 }
65199@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
65200 goto out;
65201
65202 task_lock(task);
65203- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65204+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65205 task_unlock(task);
65206 if (retval)
65207 goto unlock_creds;
65208@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
65209 goto unlock_tasklist;
65210
65211 task->ptrace = PT_PTRACED;
65212- if (capable(CAP_SYS_PTRACE))
65213+ if (capable_nolog(CAP_SYS_PTRACE))
65214 task->ptrace |= PT_PTRACE_CAP;
65215
65216 __ptrace_link(task, current);
65217@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
65218 {
65219 int copied = 0;
65220
65221+ pax_track_stack();
65222+
65223 while (len > 0) {
65224 char buf[128];
65225 int this_len, retval;
65226@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
65227 {
65228 int copied = 0;
65229
65230+ pax_track_stack();
65231+
65232 while (len > 0) {
65233 char buf[128];
65234 int this_len, retval;
65235@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
65236 int ret = -EIO;
65237 siginfo_t siginfo;
65238
65239+ pax_track_stack();
65240+
65241 switch (request) {
65242 case PTRACE_PEEKTEXT:
65243 case PTRACE_PEEKDATA:
65244@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
65245 ret = ptrace_setoptions(child, data);
65246 break;
65247 case PTRACE_GETEVENTMSG:
65248- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
65249+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
65250 break;
65251
65252 case PTRACE_GETSIGINFO:
65253 ret = ptrace_getsiginfo(child, &siginfo);
65254 if (!ret)
65255- ret = copy_siginfo_to_user((siginfo_t __user *) data,
65256+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
65257 &siginfo);
65258 break;
65259
65260 case PTRACE_SETSIGINFO:
65261- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
65262+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
65263 sizeof siginfo))
65264 ret = -EFAULT;
65265 else
65266@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
65267 goto out;
65268 }
65269
65270+ if (gr_handle_ptrace(child, request)) {
65271+ ret = -EPERM;
65272+ goto out_put_task_struct;
65273+ }
65274+
65275 if (request == PTRACE_ATTACH) {
65276 ret = ptrace_attach(child);
65277 /*
65278 * Some architectures need to do book-keeping after
65279 * a ptrace attach.
65280 */
65281- if (!ret)
65282+ if (!ret) {
65283 arch_ptrace_attach(child);
65284+ gr_audit_ptrace(child);
65285+ }
65286 goto out_put_task_struct;
65287 }
65288
65289@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
65290 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65291 if (copied != sizeof(tmp))
65292 return -EIO;
65293- return put_user(tmp, (unsigned long __user *)data);
65294+ return put_user(tmp, (__force unsigned long __user *)data);
65295 }
65296
65297 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
65298@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
65299 siginfo_t siginfo;
65300 int ret;
65301
65302+ pax_track_stack();
65303+
65304 switch (request) {
65305 case PTRACE_PEEKTEXT:
65306 case PTRACE_PEEKDATA:
65307@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
65308 goto out;
65309 }
65310
65311+ if (gr_handle_ptrace(child, request)) {
65312+ ret = -EPERM;
65313+ goto out_put_task_struct;
65314+ }
65315+
65316 if (request == PTRACE_ATTACH) {
65317 ret = ptrace_attach(child);
65318 /*
65319 * Some architectures need to do book-keeping after
65320 * a ptrace attach.
65321 */
65322- if (!ret)
65323+ if (!ret) {
65324 arch_ptrace_attach(child);
65325+ gr_audit_ptrace(child);
65326+ }
65327 goto out_put_task_struct;
65328 }
65329
65330diff -urNp linux-2.6.32.45/kernel/rcutorture.c linux-2.6.32.45/kernel/rcutorture.c
65331--- linux-2.6.32.45/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
65332+++ linux-2.6.32.45/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
65333@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
65334 { 0 };
65335 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65336 { 0 };
65337-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65338-static atomic_t n_rcu_torture_alloc;
65339-static atomic_t n_rcu_torture_alloc_fail;
65340-static atomic_t n_rcu_torture_free;
65341-static atomic_t n_rcu_torture_mberror;
65342-static atomic_t n_rcu_torture_error;
65343+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65344+static atomic_unchecked_t n_rcu_torture_alloc;
65345+static atomic_unchecked_t n_rcu_torture_alloc_fail;
65346+static atomic_unchecked_t n_rcu_torture_free;
65347+static atomic_unchecked_t n_rcu_torture_mberror;
65348+static atomic_unchecked_t n_rcu_torture_error;
65349 static long n_rcu_torture_timers;
65350 static struct list_head rcu_torture_removed;
65351 static cpumask_var_t shuffle_tmp_mask;
65352@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
65353
65354 spin_lock_bh(&rcu_torture_lock);
65355 if (list_empty(&rcu_torture_freelist)) {
65356- atomic_inc(&n_rcu_torture_alloc_fail);
65357+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65358 spin_unlock_bh(&rcu_torture_lock);
65359 return NULL;
65360 }
65361- atomic_inc(&n_rcu_torture_alloc);
65362+ atomic_inc_unchecked(&n_rcu_torture_alloc);
65363 p = rcu_torture_freelist.next;
65364 list_del_init(p);
65365 spin_unlock_bh(&rcu_torture_lock);
65366@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
65367 static void
65368 rcu_torture_free(struct rcu_torture *p)
65369 {
65370- atomic_inc(&n_rcu_torture_free);
65371+ atomic_inc_unchecked(&n_rcu_torture_free);
65372 spin_lock_bh(&rcu_torture_lock);
65373 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65374 spin_unlock_bh(&rcu_torture_lock);
65375@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
65376 i = rp->rtort_pipe_count;
65377 if (i > RCU_TORTURE_PIPE_LEN)
65378 i = RCU_TORTURE_PIPE_LEN;
65379- atomic_inc(&rcu_torture_wcount[i]);
65380+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65381 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65382 rp->rtort_mbtest = 0;
65383 rcu_torture_free(rp);
65384@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
65385 i = rp->rtort_pipe_count;
65386 if (i > RCU_TORTURE_PIPE_LEN)
65387 i = RCU_TORTURE_PIPE_LEN;
65388- atomic_inc(&rcu_torture_wcount[i]);
65389+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65390 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65391 rp->rtort_mbtest = 0;
65392 list_del(&rp->rtort_free);
65393@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
65394 i = old_rp->rtort_pipe_count;
65395 if (i > RCU_TORTURE_PIPE_LEN)
65396 i = RCU_TORTURE_PIPE_LEN;
65397- atomic_inc(&rcu_torture_wcount[i]);
65398+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65399 old_rp->rtort_pipe_count++;
65400 cur_ops->deferred_free(old_rp);
65401 }
65402@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
65403 return;
65404 }
65405 if (p->rtort_mbtest == 0)
65406- atomic_inc(&n_rcu_torture_mberror);
65407+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65408 spin_lock(&rand_lock);
65409 cur_ops->read_delay(&rand);
65410 n_rcu_torture_timers++;
65411@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
65412 continue;
65413 }
65414 if (p->rtort_mbtest == 0)
65415- atomic_inc(&n_rcu_torture_mberror);
65416+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65417 cur_ops->read_delay(&rand);
65418 preempt_disable();
65419 pipe_count = p->rtort_pipe_count;
65420@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
65421 rcu_torture_current,
65422 rcu_torture_current_version,
65423 list_empty(&rcu_torture_freelist),
65424- atomic_read(&n_rcu_torture_alloc),
65425- atomic_read(&n_rcu_torture_alloc_fail),
65426- atomic_read(&n_rcu_torture_free),
65427- atomic_read(&n_rcu_torture_mberror),
65428+ atomic_read_unchecked(&n_rcu_torture_alloc),
65429+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65430+ atomic_read_unchecked(&n_rcu_torture_free),
65431+ atomic_read_unchecked(&n_rcu_torture_mberror),
65432 n_rcu_torture_timers);
65433- if (atomic_read(&n_rcu_torture_mberror) != 0)
65434+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
65435 cnt += sprintf(&page[cnt], " !!!");
65436 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65437 if (i > 1) {
65438 cnt += sprintf(&page[cnt], "!!! ");
65439- atomic_inc(&n_rcu_torture_error);
65440+ atomic_inc_unchecked(&n_rcu_torture_error);
65441 WARN_ON_ONCE(1);
65442 }
65443 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65444@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
65445 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65446 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65447 cnt += sprintf(&page[cnt], " %d",
65448- atomic_read(&rcu_torture_wcount[i]));
65449+ atomic_read_unchecked(&rcu_torture_wcount[i]));
65450 }
65451 cnt += sprintf(&page[cnt], "\n");
65452 if (cur_ops->stats)
65453@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
65454
65455 if (cur_ops->cleanup)
65456 cur_ops->cleanup();
65457- if (atomic_read(&n_rcu_torture_error))
65458+ if (atomic_read_unchecked(&n_rcu_torture_error))
65459 rcu_torture_print_module_parms("End of test: FAILURE");
65460 else
65461 rcu_torture_print_module_parms("End of test: SUCCESS");
65462@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
65463
65464 rcu_torture_current = NULL;
65465 rcu_torture_current_version = 0;
65466- atomic_set(&n_rcu_torture_alloc, 0);
65467- atomic_set(&n_rcu_torture_alloc_fail, 0);
65468- atomic_set(&n_rcu_torture_free, 0);
65469- atomic_set(&n_rcu_torture_mberror, 0);
65470- atomic_set(&n_rcu_torture_error, 0);
65471+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65472+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65473+ atomic_set_unchecked(&n_rcu_torture_free, 0);
65474+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65475+ atomic_set_unchecked(&n_rcu_torture_error, 0);
65476 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65477- atomic_set(&rcu_torture_wcount[i], 0);
65478+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65479 for_each_possible_cpu(cpu) {
65480 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65481 per_cpu(rcu_torture_count, cpu)[i] = 0;
65482diff -urNp linux-2.6.32.45/kernel/rcutree.c linux-2.6.32.45/kernel/rcutree.c
65483--- linux-2.6.32.45/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
65484+++ linux-2.6.32.45/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
65485@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
65486 /*
65487 * Do softirq processing for the current CPU.
65488 */
65489-static void rcu_process_callbacks(struct softirq_action *unused)
65490+static void rcu_process_callbacks(void)
65491 {
65492 /*
65493 * Memory references from any prior RCU read-side critical sections
65494diff -urNp linux-2.6.32.45/kernel/rcutree_plugin.h linux-2.6.32.45/kernel/rcutree_plugin.h
65495--- linux-2.6.32.45/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
65496+++ linux-2.6.32.45/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
65497@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
65498 */
65499 void __rcu_read_lock(void)
65500 {
65501- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
65502+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
65503 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
65504 }
65505 EXPORT_SYMBOL_GPL(__rcu_read_lock);
65506@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
65507 struct task_struct *t = current;
65508
65509 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
65510- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
65511+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
65512 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
65513 rcu_read_unlock_special(t);
65514 }
65515diff -urNp linux-2.6.32.45/kernel/relay.c linux-2.6.32.45/kernel/relay.c
65516--- linux-2.6.32.45/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
65517+++ linux-2.6.32.45/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
65518@@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
65519 unsigned int flags,
65520 int *nonpad_ret)
65521 {
65522- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
65523+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
65524 struct rchan_buf *rbuf = in->private_data;
65525 unsigned int subbuf_size = rbuf->chan->subbuf_size;
65526 uint64_t pos = (uint64_t) *ppos;
65527@@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
65528 .ops = &relay_pipe_buf_ops,
65529 .spd_release = relay_page_release,
65530 };
65531+ ssize_t ret;
65532+
65533+ pax_track_stack();
65534
65535 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
65536 return 0;
65537diff -urNp linux-2.6.32.45/kernel/resource.c linux-2.6.32.45/kernel/resource.c
65538--- linux-2.6.32.45/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
65539+++ linux-2.6.32.45/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
65540@@ -132,8 +132,18 @@ static const struct file_operations proc
65541
65542 static int __init ioresources_init(void)
65543 {
65544+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65545+#ifdef CONFIG_GRKERNSEC_PROC_USER
65546+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65547+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65548+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65549+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65550+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65551+#endif
65552+#else
65553 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65554 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65555+#endif
65556 return 0;
65557 }
65558 __initcall(ioresources_init);
65559diff -urNp linux-2.6.32.45/kernel/rtmutex.c linux-2.6.32.45/kernel/rtmutex.c
65560--- linux-2.6.32.45/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
65561+++ linux-2.6.32.45/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
65562@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
65563 */
65564 spin_lock_irqsave(&pendowner->pi_lock, flags);
65565
65566- WARN_ON(!pendowner->pi_blocked_on);
65567+ BUG_ON(!pendowner->pi_blocked_on);
65568 WARN_ON(pendowner->pi_blocked_on != waiter);
65569 WARN_ON(pendowner->pi_blocked_on->lock != lock);
65570
65571diff -urNp linux-2.6.32.45/kernel/rtmutex-tester.c linux-2.6.32.45/kernel/rtmutex-tester.c
65572--- linux-2.6.32.45/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
65573+++ linux-2.6.32.45/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
65574@@ -21,7 +21,7 @@
65575 #define MAX_RT_TEST_MUTEXES 8
65576
65577 static spinlock_t rttest_lock;
65578-static atomic_t rttest_event;
65579+static atomic_unchecked_t rttest_event;
65580
65581 struct test_thread_data {
65582 int opcode;
65583@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
65584
65585 case RTTEST_LOCKCONT:
65586 td->mutexes[td->opdata] = 1;
65587- td->event = atomic_add_return(1, &rttest_event);
65588+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65589 return 0;
65590
65591 case RTTEST_RESET:
65592@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
65593 return 0;
65594
65595 case RTTEST_RESETEVENT:
65596- atomic_set(&rttest_event, 0);
65597+ atomic_set_unchecked(&rttest_event, 0);
65598 return 0;
65599
65600 default:
65601@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
65602 return ret;
65603
65604 td->mutexes[id] = 1;
65605- td->event = atomic_add_return(1, &rttest_event);
65606+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65607 rt_mutex_lock(&mutexes[id]);
65608- td->event = atomic_add_return(1, &rttest_event);
65609+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65610 td->mutexes[id] = 4;
65611 return 0;
65612
65613@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
65614 return ret;
65615
65616 td->mutexes[id] = 1;
65617- td->event = atomic_add_return(1, &rttest_event);
65618+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65619 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65620- td->event = atomic_add_return(1, &rttest_event);
65621+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65622 td->mutexes[id] = ret ? 0 : 4;
65623 return ret ? -EINTR : 0;
65624
65625@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
65626 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65627 return ret;
65628
65629- td->event = atomic_add_return(1, &rttest_event);
65630+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65631 rt_mutex_unlock(&mutexes[id]);
65632- td->event = atomic_add_return(1, &rttest_event);
65633+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65634 td->mutexes[id] = 0;
65635 return 0;
65636
65637@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
65638 break;
65639
65640 td->mutexes[dat] = 2;
65641- td->event = atomic_add_return(1, &rttest_event);
65642+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65643 break;
65644
65645 case RTTEST_LOCKBKL:
65646@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
65647 return;
65648
65649 td->mutexes[dat] = 3;
65650- td->event = atomic_add_return(1, &rttest_event);
65651+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65652 break;
65653
65654 case RTTEST_LOCKNOWAIT:
65655@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
65656 return;
65657
65658 td->mutexes[dat] = 1;
65659- td->event = atomic_add_return(1, &rttest_event);
65660+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65661 return;
65662
65663 case RTTEST_LOCKBKL:
65664diff -urNp linux-2.6.32.45/kernel/sched.c linux-2.6.32.45/kernel/sched.c
65665--- linux-2.6.32.45/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
65666+++ linux-2.6.32.45/kernel/sched.c 2011-08-21 19:29:25.000000000 -0400
65667@@ -2764,9 +2764,10 @@ void wake_up_new_task(struct task_struct
65668 {
65669 unsigned long flags;
65670 struct rq *rq;
65671- int cpu = get_cpu();
65672
65673 #ifdef CONFIG_SMP
65674+ int cpu = get_cpu();
65675+
65676 rq = task_rq_lock(p, &flags);
65677 p->state = TASK_WAKING;
65678
65679@@ -5043,7 +5044,7 @@ out:
65680 * In CONFIG_NO_HZ case, the idle load balance owner will do the
65681 * rebalancing for all the cpus for whom scheduler ticks are stopped.
65682 */
65683-static void run_rebalance_domains(struct softirq_action *h)
65684+static void run_rebalance_domains(void)
65685 {
65686 int this_cpu = smp_processor_id();
65687 struct rq *this_rq = cpu_rq(this_cpu);
65688@@ -5700,6 +5701,8 @@ asmlinkage void __sched schedule(void)
65689 struct rq *rq;
65690 int cpu;
65691
65692+ pax_track_stack();
65693+
65694 need_resched:
65695 preempt_disable();
65696 cpu = smp_processor_id();
65697@@ -5770,7 +5773,7 @@ EXPORT_SYMBOL(schedule);
65698 * Look out! "owner" is an entirely speculative pointer
65699 * access and not reliable.
65700 */
65701-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
65702+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
65703 {
65704 unsigned int cpu;
65705 struct rq *rq;
65706@@ -5784,10 +5787,10 @@ int mutex_spin_on_owner(struct mutex *lo
65707 * DEBUG_PAGEALLOC could have unmapped it if
65708 * the mutex owner just released it and exited.
65709 */
65710- if (probe_kernel_address(&owner->cpu, cpu))
65711+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
65712 return 0;
65713 #else
65714- cpu = owner->cpu;
65715+ cpu = task_thread_info(owner)->cpu;
65716 #endif
65717
65718 /*
65719@@ -5816,7 +5819,7 @@ int mutex_spin_on_owner(struct mutex *lo
65720 /*
65721 * Is that owner really running on that cpu?
65722 */
65723- if (task_thread_info(rq->curr) != owner || need_resched())
65724+ if (rq->curr != owner || need_resched())
65725 return 0;
65726
65727 cpu_relax();
65728@@ -6359,6 +6362,8 @@ int can_nice(const struct task_struct *p
65729 /* convert nice value [19,-20] to rlimit style value [1,40] */
65730 int nice_rlim = 20 - nice;
65731
65732+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65733+
65734 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
65735 capable(CAP_SYS_NICE));
65736 }
65737@@ -6392,7 +6397,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65738 if (nice > 19)
65739 nice = 19;
65740
65741- if (increment < 0 && !can_nice(current, nice))
65742+ if (increment < 0 && (!can_nice(current, nice) ||
65743+ gr_handle_chroot_nice()))
65744 return -EPERM;
65745
65746 retval = security_task_setnice(current, nice);
65747@@ -8774,7 +8780,7 @@ static void init_sched_groups_power(int
65748 long power;
65749 int weight;
65750
65751- WARN_ON(!sd || !sd->groups);
65752+ BUG_ON(!sd || !sd->groups);
65753
65754 if (cpu != group_first_cpu(sd->groups))
65755 return;
65756diff -urNp linux-2.6.32.45/kernel/signal.c linux-2.6.32.45/kernel/signal.c
65757--- linux-2.6.32.45/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
65758+++ linux-2.6.32.45/kernel/signal.c 2011-08-16 21:15:58.000000000 -0400
65759@@ -41,12 +41,12 @@
65760
65761 static struct kmem_cache *sigqueue_cachep;
65762
65763-static void __user *sig_handler(struct task_struct *t, int sig)
65764+static __sighandler_t sig_handler(struct task_struct *t, int sig)
65765 {
65766 return t->sighand->action[sig - 1].sa.sa_handler;
65767 }
65768
65769-static int sig_handler_ignored(void __user *handler, int sig)
65770+static int sig_handler_ignored(__sighandler_t handler, int sig)
65771 {
65772 /* Is it explicitly or implicitly ignored? */
65773 return handler == SIG_IGN ||
65774@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
65775 static int sig_task_ignored(struct task_struct *t, int sig,
65776 int from_ancestor_ns)
65777 {
65778- void __user *handler;
65779+ __sighandler_t handler;
65780
65781 handler = sig_handler(t, sig);
65782
65783@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
65784 */
65785 user = get_uid(__task_cred(t)->user);
65786 atomic_inc(&user->sigpending);
65787+
65788+ if (!override_rlimit)
65789+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
65790 if (override_rlimit ||
65791 atomic_read(&user->sigpending) <=
65792 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
65793@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
65794
65795 int unhandled_signal(struct task_struct *tsk, int sig)
65796 {
65797- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
65798+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
65799 if (is_global_init(tsk))
65800 return 1;
65801 if (handler != SIG_IGN && handler != SIG_DFL)
65802@@ -627,6 +630,13 @@ static int check_kill_permission(int sig
65803 }
65804 }
65805
65806+ /* allow glibc communication via tgkill to other threads in our
65807+ thread group */
65808+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
65809+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
65810+ && gr_handle_signal(t, sig))
65811+ return -EPERM;
65812+
65813 return security_task_kill(t, info, sig, 0);
65814 }
65815
65816@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct si
65817 return send_signal(sig, info, p, 1);
65818 }
65819
65820-static int
65821+int
65822 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
65823 {
65824 return send_signal(sig, info, t, 0);
65825@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *
65826 unsigned long int flags;
65827 int ret, blocked, ignored;
65828 struct k_sigaction *action;
65829+ int is_unhandled = 0;
65830
65831 spin_lock_irqsave(&t->sighand->siglock, flags);
65832 action = &t->sighand->action[sig-1];
65833@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *
65834 }
65835 if (action->sa.sa_handler == SIG_DFL)
65836 t->signal->flags &= ~SIGNAL_UNKILLABLE;
65837+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
65838+ is_unhandled = 1;
65839 ret = specific_send_sig_info(sig, info, t);
65840 spin_unlock_irqrestore(&t->sighand->siglock, flags);
65841
65842+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
65843+ normal operation */
65844+ if (is_unhandled) {
65845+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
65846+ gr_handle_crash(t, sig);
65847+ }
65848+
65849 return ret;
65850 }
65851
65852@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct
65853 {
65854 int ret = check_kill_permission(sig, info, p);
65855
65856- if (!ret && sig)
65857+ if (!ret && sig) {
65858 ret = do_send_sig_info(sig, info, p, true);
65859+ if (!ret)
65860+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
65861+ }
65862
65863 return ret;
65864 }
65865@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
65866 {
65867 siginfo_t info;
65868
65869+ pax_track_stack();
65870+
65871 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
65872
65873 memset(&info, 0, sizeof info);
65874@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid,
65875 int error = -ESRCH;
65876
65877 rcu_read_lock();
65878- p = find_task_by_vpid(pid);
65879+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65880+ /* allow glibc communication via tgkill to other threads in our
65881+ thread group */
65882+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
65883+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
65884+ p = find_task_by_vpid_unrestricted(pid);
65885+ else
65886+#endif
65887+ p = find_task_by_vpid(pid);
65888 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
65889 error = check_kill_permission(sig, info, p);
65890 /*
65891diff -urNp linux-2.6.32.45/kernel/smp.c linux-2.6.32.45/kernel/smp.c
65892--- linux-2.6.32.45/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
65893+++ linux-2.6.32.45/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
65894@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
65895 }
65896 EXPORT_SYMBOL(smp_call_function);
65897
65898-void ipi_call_lock(void)
65899+void ipi_call_lock(void) __acquires(call_function.lock)
65900 {
65901 spin_lock(&call_function.lock);
65902 }
65903
65904-void ipi_call_unlock(void)
65905+void ipi_call_unlock(void) __releases(call_function.lock)
65906 {
65907 spin_unlock(&call_function.lock);
65908 }
65909
65910-void ipi_call_lock_irq(void)
65911+void ipi_call_lock_irq(void) __acquires(call_function.lock)
65912 {
65913 spin_lock_irq(&call_function.lock);
65914 }
65915
65916-void ipi_call_unlock_irq(void)
65917+void ipi_call_unlock_irq(void) __releases(call_function.lock)
65918 {
65919 spin_unlock_irq(&call_function.lock);
65920 }
65921diff -urNp linux-2.6.32.45/kernel/softirq.c linux-2.6.32.45/kernel/softirq.c
65922--- linux-2.6.32.45/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
65923+++ linux-2.6.32.45/kernel/softirq.c 2011-08-05 20:33:55.000000000 -0400
65924@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
65925
65926 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
65927
65928-char *softirq_to_name[NR_SOFTIRQS] = {
65929+const char * const softirq_to_name[NR_SOFTIRQS] = {
65930 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
65931 "TASKLET", "SCHED", "HRTIMER", "RCU"
65932 };
65933@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
65934
65935 asmlinkage void __do_softirq(void)
65936 {
65937- struct softirq_action *h;
65938+ const struct softirq_action *h;
65939 __u32 pending;
65940 int max_restart = MAX_SOFTIRQ_RESTART;
65941 int cpu;
65942@@ -233,7 +233,7 @@ restart:
65943 kstat_incr_softirqs_this_cpu(h - softirq_vec);
65944
65945 trace_softirq_entry(h, softirq_vec);
65946- h->action(h);
65947+ h->action();
65948 trace_softirq_exit(h, softirq_vec);
65949 if (unlikely(prev_count != preempt_count())) {
65950 printk(KERN_ERR "huh, entered softirq %td %s %p"
65951@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
65952 local_irq_restore(flags);
65953 }
65954
65955-void open_softirq(int nr, void (*action)(struct softirq_action *))
65956+void open_softirq(int nr, void (*action)(void))
65957 {
65958- softirq_vec[nr].action = action;
65959+ pax_open_kernel();
65960+ *(void **)&softirq_vec[nr].action = action;
65961+ pax_close_kernel();
65962 }
65963
65964 /*
65965@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct
65966
65967 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
65968
65969-static void tasklet_action(struct softirq_action *a)
65970+static void tasklet_action(void)
65971 {
65972 struct tasklet_struct *list;
65973
65974@@ -454,7 +456,7 @@ static void tasklet_action(struct softir
65975 }
65976 }
65977
65978-static void tasklet_hi_action(struct softirq_action *a)
65979+static void tasklet_hi_action(void)
65980 {
65981 struct tasklet_struct *list;
65982
65983diff -urNp linux-2.6.32.45/kernel/sys.c linux-2.6.32.45/kernel/sys.c
65984--- linux-2.6.32.45/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
65985+++ linux-2.6.32.45/kernel/sys.c 2011-08-11 19:51:54.000000000 -0400
65986@@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
65987 error = -EACCES;
65988 goto out;
65989 }
65990+
65991+ if (gr_handle_chroot_setpriority(p, niceval)) {
65992+ error = -EACCES;
65993+ goto out;
65994+ }
65995+
65996 no_nice = security_task_setnice(p, niceval);
65997 if (no_nice) {
65998 error = no_nice;
65999@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
66000 !(user = find_user(who)))
66001 goto out_unlock; /* No processes for this user */
66002
66003- do_each_thread(g, p)
66004+ do_each_thread(g, p) {
66005 if (__task_cred(p)->uid == who)
66006 error = set_one_prio(p, niceval, error);
66007- while_each_thread(g, p);
66008+ } while_each_thread(g, p);
66009 if (who != cred->uid)
66010 free_uid(user); /* For find_user() */
66011 break;
66012@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
66013 !(user = find_user(who)))
66014 goto out_unlock; /* No processes for this user */
66015
66016- do_each_thread(g, p)
66017+ do_each_thread(g, p) {
66018 if (__task_cred(p)->uid == who) {
66019 niceval = 20 - task_nice(p);
66020 if (niceval > retval)
66021 retval = niceval;
66022 }
66023- while_each_thread(g, p);
66024+ } while_each_thread(g, p);
66025 if (who != cred->uid)
66026 free_uid(user); /* for find_user() */
66027 break;
66028@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
66029 goto error;
66030 }
66031
66032+ if (gr_check_group_change(new->gid, new->egid, -1))
66033+ goto error;
66034+
66035 if (rgid != (gid_t) -1 ||
66036 (egid != (gid_t) -1 && egid != old->gid))
66037 new->sgid = new->egid;
66038@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
66039 goto error;
66040
66041 retval = -EPERM;
66042+
66043+ if (gr_check_group_change(gid, gid, gid))
66044+ goto error;
66045+
66046 if (capable(CAP_SETGID))
66047 new->gid = new->egid = new->sgid = new->fsgid = gid;
66048 else if (gid == old->gid || gid == old->sgid)
66049@@ -567,12 +580,19 @@ static int set_user(struct cred *new)
66050 if (!new_user)
66051 return -EAGAIN;
66052
66053+ /*
66054+ * We don't fail in case of NPROC limit excess here because too many
66055+ * poorly written programs don't check set*uid() return code, assuming
66056+ * it never fails if called by root. We may still enforce NPROC limit
66057+ * for programs doing set*uid()+execve() by harmlessly deferring the
66058+ * failure to the execve() stage.
66059+ */
66060 if (atomic_read(&new_user->processes) >=
66061 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
66062- new_user != INIT_USER) {
66063- free_uid(new_user);
66064- return -EAGAIN;
66065- }
66066+ new_user != INIT_USER)
66067+ current->flags |= PF_NPROC_EXCEEDED;
66068+ else
66069+ current->flags &= ~PF_NPROC_EXCEEDED;
66070
66071 free_uid(new->user);
66072 new->user = new_user;
66073@@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
66074 goto error;
66075 }
66076
66077+ if (gr_check_user_change(new->uid, new->euid, -1))
66078+ goto error;
66079+
66080 if (new->uid != old->uid) {
66081 retval = set_user(new);
66082 if (retval < 0)
66083@@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
66084 goto error;
66085
66086 retval = -EPERM;
66087+
66088+ if (gr_check_crash_uid(uid))
66089+ goto error;
66090+ if (gr_check_user_change(uid, uid, uid))
66091+ goto error;
66092+
66093 if (capable(CAP_SETUID)) {
66094 new->suid = new->uid = uid;
66095 if (uid != old->uid) {
66096@@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
66097 goto error;
66098 }
66099
66100+ if (gr_check_user_change(ruid, euid, -1))
66101+ goto error;
66102+
66103 if (ruid != (uid_t) -1) {
66104 new->uid = ruid;
66105 if (ruid != old->uid) {
66106@@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
66107 goto error;
66108 }
66109
66110+ if (gr_check_group_change(rgid, egid, -1))
66111+ goto error;
66112+
66113 if (rgid != (gid_t) -1)
66114 new->gid = rgid;
66115 if (egid != (gid_t) -1)
66116@@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66117 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
66118 goto error;
66119
66120+ if (gr_check_user_change(-1, -1, uid))
66121+ goto error;
66122+
66123 if (uid == old->uid || uid == old->euid ||
66124 uid == old->suid || uid == old->fsuid ||
66125 capable(CAP_SETUID)) {
66126@@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
66127 if (gid == old->gid || gid == old->egid ||
66128 gid == old->sgid || gid == old->fsgid ||
66129 capable(CAP_SETGID)) {
66130+ if (gr_check_group_change(-1, -1, gid))
66131+ goto error;
66132+
66133 if (gid != old_fsgid) {
66134 new->fsgid = gid;
66135 goto change_okay;
66136@@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
66137 error = get_dumpable(me->mm);
66138 break;
66139 case PR_SET_DUMPABLE:
66140- if (arg2 < 0 || arg2 > 1) {
66141+ if (arg2 > 1) {
66142 error = -EINVAL;
66143 break;
66144 }
66145diff -urNp linux-2.6.32.45/kernel/sysctl.c linux-2.6.32.45/kernel/sysctl.c
66146--- linux-2.6.32.45/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
66147+++ linux-2.6.32.45/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
66148@@ -63,6 +63,13 @@
66149 static int deprecated_sysctl_warning(struct __sysctl_args *args);
66150
66151 #if defined(CONFIG_SYSCTL)
66152+#include <linux/grsecurity.h>
66153+#include <linux/grinternal.h>
66154+
66155+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66156+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66157+ const int op);
66158+extern int gr_handle_chroot_sysctl(const int op);
66159
66160 /* External variables not in a header file. */
66161 extern int C_A_D;
66162@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
66163 static int proc_taint(struct ctl_table *table, int write,
66164 void __user *buffer, size_t *lenp, loff_t *ppos);
66165 #endif
66166+extern ctl_table grsecurity_table[];
66167
66168 static struct ctl_table root_table[];
66169 static struct ctl_table_root sysctl_table_root;
66170@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
66171 int sysctl_legacy_va_layout;
66172 #endif
66173
66174+#ifdef CONFIG_PAX_SOFTMODE
66175+static ctl_table pax_table[] = {
66176+ {
66177+ .ctl_name = CTL_UNNUMBERED,
66178+ .procname = "softmode",
66179+ .data = &pax_softmode,
66180+ .maxlen = sizeof(unsigned int),
66181+ .mode = 0600,
66182+ .proc_handler = &proc_dointvec,
66183+ },
66184+
66185+ { .ctl_name = 0 }
66186+};
66187+#endif
66188+
66189 extern int prove_locking;
66190 extern int lock_stat;
66191
66192@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
66193 #endif
66194
66195 static struct ctl_table kern_table[] = {
66196+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66197+ {
66198+ .ctl_name = CTL_UNNUMBERED,
66199+ .procname = "grsecurity",
66200+ .mode = 0500,
66201+ .child = grsecurity_table,
66202+ },
66203+#endif
66204+
66205+#ifdef CONFIG_PAX_SOFTMODE
66206+ {
66207+ .ctl_name = CTL_UNNUMBERED,
66208+ .procname = "pax",
66209+ .mode = 0500,
66210+ .child = pax_table,
66211+ },
66212+#endif
66213+
66214 {
66215 .ctl_name = CTL_UNNUMBERED,
66216 .procname = "sched_child_runs_first",
66217@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
66218 .data = &modprobe_path,
66219 .maxlen = KMOD_PATH_LEN,
66220 .mode = 0644,
66221- .proc_handler = &proc_dostring,
66222- .strategy = &sysctl_string,
66223+ .proc_handler = &proc_dostring_modpriv,
66224+ .strategy = &sysctl_string_modpriv,
66225 },
66226 {
66227 .ctl_name = CTL_UNNUMBERED,
66228@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
66229 .mode = 0644,
66230 .proc_handler = &proc_dointvec
66231 },
66232+ {
66233+ .procname = "heap_stack_gap",
66234+ .data = &sysctl_heap_stack_gap,
66235+ .maxlen = sizeof(sysctl_heap_stack_gap),
66236+ .mode = 0644,
66237+ .proc_handler = proc_doulongvec_minmax,
66238+ },
66239 #else
66240 {
66241 .ctl_name = CTL_UNNUMBERED,
66242@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
66243 return 0;
66244 }
66245
66246+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
66247+
66248 static int parse_table(int __user *name, int nlen,
66249 void __user *oldval, size_t __user *oldlenp,
66250 void __user *newval, size_t newlen,
66251@@ -1821,7 +1871,7 @@ repeat:
66252 if (n == table->ctl_name) {
66253 int error;
66254 if (table->child) {
66255- if (sysctl_perm(root, table, MAY_EXEC))
66256+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
66257 return -EPERM;
66258 name++;
66259 nlen--;
66260@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
66261 int error;
66262 int mode;
66263
66264+ if (table->parent != NULL && table->parent->procname != NULL &&
66265+ table->procname != NULL &&
66266+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66267+ return -EACCES;
66268+ if (gr_handle_chroot_sysctl(op))
66269+ return -EACCES;
66270+ error = gr_handle_sysctl(table, op);
66271+ if (error)
66272+ return error;
66273+
66274+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
66275+ if (error)
66276+ return error;
66277+
66278+ if (root->permissions)
66279+ mode = root->permissions(root, current->nsproxy, table);
66280+ else
66281+ mode = table->mode;
66282+
66283+ return test_perm(mode, op);
66284+}
66285+
66286+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
66287+{
66288+ int error;
66289+ int mode;
66290+
66291 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
66292 if (error)
66293 return error;
66294@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
66295 buffer, lenp, ppos);
66296 }
66297
66298+int proc_dostring_modpriv(struct ctl_table *table, int write,
66299+ void __user *buffer, size_t *lenp, loff_t *ppos)
66300+{
66301+ if (write && !capable(CAP_SYS_MODULE))
66302+ return -EPERM;
66303+
66304+ return _proc_do_string(table->data, table->maxlen, write,
66305+ buffer, lenp, ppos);
66306+}
66307+
66308
66309 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
66310 int *valp,
66311@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
66312 vleft = table->maxlen / sizeof(unsigned long);
66313 left = *lenp;
66314
66315- for (; left && vleft--; i++, min++, max++, first=0) {
66316+ for (; left && vleft--; i++, first=0) {
66317 if (write) {
66318 while (left) {
66319 char c;
66320@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
66321 return -ENOSYS;
66322 }
66323
66324+int proc_dostring_modpriv(struct ctl_table *table, int write,
66325+ void __user *buffer, size_t *lenp, loff_t *ppos)
66326+{
66327+ return -ENOSYS;
66328+}
66329+
66330 int proc_dointvec(struct ctl_table *table, int write,
66331 void __user *buffer, size_t *lenp, loff_t *ppos)
66332 {
66333@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
66334 return 1;
66335 }
66336
66337+int sysctl_string_modpriv(struct ctl_table *table,
66338+ void __user *oldval, size_t __user *oldlenp,
66339+ void __user *newval, size_t newlen)
66340+{
66341+ if (newval && newlen && !capable(CAP_SYS_MODULE))
66342+ return -EPERM;
66343+
66344+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
66345+}
66346+
66347 /*
66348 * This function makes sure that all of the integers in the vector
66349 * are between the minimum and maximum values given in the arrays
66350@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
66351 return -ENOSYS;
66352 }
66353
66354+int sysctl_string_modpriv(struct ctl_table *table,
66355+ void __user *oldval, size_t __user *oldlenp,
66356+ void __user *newval, size_t newlen)
66357+{
66358+ return -ENOSYS;
66359+}
66360+
66361 int sysctl_intvec(struct ctl_table *table,
66362 void __user *oldval, size_t __user *oldlenp,
66363 void __user *newval, size_t newlen)
66364@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66365 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66366 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66367 EXPORT_SYMBOL(proc_dostring);
66368+EXPORT_SYMBOL(proc_dostring_modpriv);
66369 EXPORT_SYMBOL(proc_doulongvec_minmax);
66370 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66371 EXPORT_SYMBOL(register_sysctl_table);
66372@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
66373 EXPORT_SYMBOL(sysctl_jiffies);
66374 EXPORT_SYMBOL(sysctl_ms_jiffies);
66375 EXPORT_SYMBOL(sysctl_string);
66376+EXPORT_SYMBOL(sysctl_string_modpriv);
66377 EXPORT_SYMBOL(sysctl_data);
66378 EXPORT_SYMBOL(unregister_sysctl_table);
66379diff -urNp linux-2.6.32.45/kernel/sysctl_check.c linux-2.6.32.45/kernel/sysctl_check.c
66380--- linux-2.6.32.45/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
66381+++ linux-2.6.32.45/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
66382@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
66383 } else {
66384 if ((table->strategy == sysctl_data) ||
66385 (table->strategy == sysctl_string) ||
66386+ (table->strategy == sysctl_string_modpriv) ||
66387 (table->strategy == sysctl_intvec) ||
66388 (table->strategy == sysctl_jiffies) ||
66389 (table->strategy == sysctl_ms_jiffies) ||
66390 (table->proc_handler == proc_dostring) ||
66391+ (table->proc_handler == proc_dostring_modpriv) ||
66392 (table->proc_handler == proc_dointvec) ||
66393 (table->proc_handler == proc_dointvec_minmax) ||
66394 (table->proc_handler == proc_dointvec_jiffies) ||
66395diff -urNp linux-2.6.32.45/kernel/taskstats.c linux-2.6.32.45/kernel/taskstats.c
66396--- linux-2.6.32.45/kernel/taskstats.c 2011-07-13 17:23:04.000000000 -0400
66397+++ linux-2.6.32.45/kernel/taskstats.c 2011-07-13 17:23:19.000000000 -0400
66398@@ -26,9 +26,12 @@
66399 #include <linux/cgroup.h>
66400 #include <linux/fs.h>
66401 #include <linux/file.h>
66402+#include <linux/grsecurity.h>
66403 #include <net/genetlink.h>
66404 #include <asm/atomic.h>
66405
66406+extern int gr_is_taskstats_denied(int pid);
66407+
66408 /*
66409 * Maximum length of a cpumask that can be specified in
66410 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66411@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_
66412 size_t size;
66413 cpumask_var_t mask;
66414
66415+ if (gr_is_taskstats_denied(current->pid))
66416+ return -EACCES;
66417+
66418 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
66419 return -ENOMEM;
66420
66421diff -urNp linux-2.6.32.45/kernel/time/tick-broadcast.c linux-2.6.32.45/kernel/time/tick-broadcast.c
66422--- linux-2.6.32.45/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
66423+++ linux-2.6.32.45/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
66424@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
66425 * then clear the broadcast bit.
66426 */
66427 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66428- int cpu = smp_processor_id();
66429+ cpu = smp_processor_id();
66430
66431 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66432 tick_broadcast_clear_oneshot(cpu);
66433diff -urNp linux-2.6.32.45/kernel/time/timekeeping.c linux-2.6.32.45/kernel/time/timekeeping.c
66434--- linux-2.6.32.45/kernel/time/timekeeping.c 2011-06-25 12:55:35.000000000 -0400
66435+++ linux-2.6.32.45/kernel/time/timekeeping.c 2011-06-25 12:56:37.000000000 -0400
66436@@ -14,6 +14,7 @@
66437 #include <linux/init.h>
66438 #include <linux/mm.h>
66439 #include <linux/sched.h>
66440+#include <linux/grsecurity.h>
66441 #include <linux/sysdev.h>
66442 #include <linux/clocksource.h>
66443 #include <linux/jiffies.h>
66444@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
66445 */
66446 struct timespec ts = xtime;
66447 timespec_add_ns(&ts, nsec);
66448- ACCESS_ONCE(xtime_cache) = ts;
66449+ ACCESS_ONCE_RW(xtime_cache) = ts;
66450 }
66451
66452 /* must hold xtime_lock */
66453@@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
66454 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66455 return -EINVAL;
66456
66457+ gr_log_timechange();
66458+
66459 write_seqlock_irqsave(&xtime_lock, flags);
66460
66461 timekeeping_forward_now();
66462diff -urNp linux-2.6.32.45/kernel/time/timer_list.c linux-2.6.32.45/kernel/time/timer_list.c
66463--- linux-2.6.32.45/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
66464+++ linux-2.6.32.45/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
66465@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
66466
66467 static void print_name_offset(struct seq_file *m, void *sym)
66468 {
66469+#ifdef CONFIG_GRKERNSEC_HIDESYM
66470+ SEQ_printf(m, "<%p>", NULL);
66471+#else
66472 char symname[KSYM_NAME_LEN];
66473
66474 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66475 SEQ_printf(m, "<%p>", sym);
66476 else
66477 SEQ_printf(m, "%s", symname);
66478+#endif
66479 }
66480
66481 static void
66482@@ -112,7 +116,11 @@ next_one:
66483 static void
66484 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66485 {
66486+#ifdef CONFIG_GRKERNSEC_HIDESYM
66487+ SEQ_printf(m, " .base: %p\n", NULL);
66488+#else
66489 SEQ_printf(m, " .base: %p\n", base);
66490+#endif
66491 SEQ_printf(m, " .index: %d\n",
66492 base->index);
66493 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66494@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
66495 {
66496 struct proc_dir_entry *pe;
66497
66498+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66499+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66500+#else
66501 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66502+#endif
66503 if (!pe)
66504 return -ENOMEM;
66505 return 0;
66506diff -urNp linux-2.6.32.45/kernel/time/timer_stats.c linux-2.6.32.45/kernel/time/timer_stats.c
66507--- linux-2.6.32.45/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
66508+++ linux-2.6.32.45/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
66509@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66510 static unsigned long nr_entries;
66511 static struct entry entries[MAX_ENTRIES];
66512
66513-static atomic_t overflow_count;
66514+static atomic_unchecked_t overflow_count;
66515
66516 /*
66517 * The entries are in a hash-table, for fast lookup:
66518@@ -140,7 +140,7 @@ static void reset_entries(void)
66519 nr_entries = 0;
66520 memset(entries, 0, sizeof(entries));
66521 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66522- atomic_set(&overflow_count, 0);
66523+ atomic_set_unchecked(&overflow_count, 0);
66524 }
66525
66526 static struct entry *alloc_entry(void)
66527@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
66528 if (likely(entry))
66529 entry->count++;
66530 else
66531- atomic_inc(&overflow_count);
66532+ atomic_inc_unchecked(&overflow_count);
66533
66534 out_unlock:
66535 spin_unlock_irqrestore(lock, flags);
66536@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
66537
66538 static void print_name_offset(struct seq_file *m, unsigned long addr)
66539 {
66540+#ifdef CONFIG_GRKERNSEC_HIDESYM
66541+ seq_printf(m, "<%p>", NULL);
66542+#else
66543 char symname[KSYM_NAME_LEN];
66544
66545 if (lookup_symbol_name(addr, symname) < 0)
66546 seq_printf(m, "<%p>", (void *)addr);
66547 else
66548 seq_printf(m, "%s", symname);
66549+#endif
66550 }
66551
66552 static int tstats_show(struct seq_file *m, void *v)
66553@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
66554
66555 seq_puts(m, "Timer Stats Version: v0.2\n");
66556 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66557- if (atomic_read(&overflow_count))
66558+ if (atomic_read_unchecked(&overflow_count))
66559 seq_printf(m, "Overflow: %d entries\n",
66560- atomic_read(&overflow_count));
66561+ atomic_read_unchecked(&overflow_count));
66562
66563 for (i = 0; i < nr_entries; i++) {
66564 entry = entries + i;
66565@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
66566 {
66567 struct proc_dir_entry *pe;
66568
66569+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66570+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66571+#else
66572 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66573+#endif
66574 if (!pe)
66575 return -ENOMEM;
66576 return 0;
66577diff -urNp linux-2.6.32.45/kernel/time.c linux-2.6.32.45/kernel/time.c
66578--- linux-2.6.32.45/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
66579+++ linux-2.6.32.45/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
66580@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
66581 return error;
66582
66583 if (tz) {
66584+ /* we log in do_settimeofday called below, so don't log twice
66585+ */
66586+ if (!tv)
66587+ gr_log_timechange();
66588+
66589 /* SMP safe, global irq locking makes it work. */
66590 sys_tz = *tz;
66591 update_vsyscall_tz();
66592@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
66593 * Avoid unnecessary multiplications/divisions in the
66594 * two most common HZ cases:
66595 */
66596-unsigned int inline jiffies_to_msecs(const unsigned long j)
66597+inline unsigned int jiffies_to_msecs(const unsigned long j)
66598 {
66599 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
66600 return (MSEC_PER_SEC / HZ) * j;
66601@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
66602 }
66603 EXPORT_SYMBOL(jiffies_to_msecs);
66604
66605-unsigned int inline jiffies_to_usecs(const unsigned long j)
66606+inline unsigned int jiffies_to_usecs(const unsigned long j)
66607 {
66608 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
66609 return (USEC_PER_SEC / HZ) * j;
66610diff -urNp linux-2.6.32.45/kernel/timer.c linux-2.6.32.45/kernel/timer.c
66611--- linux-2.6.32.45/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
66612+++ linux-2.6.32.45/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
66613@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
66614 /*
66615 * This function runs timers and the timer-tq in bottom half context.
66616 */
66617-static void run_timer_softirq(struct softirq_action *h)
66618+static void run_timer_softirq(void)
66619 {
66620 struct tvec_base *base = __get_cpu_var(tvec_bases);
66621
66622diff -urNp linux-2.6.32.45/kernel/trace/blktrace.c linux-2.6.32.45/kernel/trace/blktrace.c
66623--- linux-2.6.32.45/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
66624+++ linux-2.6.32.45/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
66625@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
66626 struct blk_trace *bt = filp->private_data;
66627 char buf[16];
66628
66629- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66630+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66631
66632 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66633 }
66634@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
66635 return 1;
66636
66637 bt = buf->chan->private_data;
66638- atomic_inc(&bt->dropped);
66639+ atomic_inc_unchecked(&bt->dropped);
66640 return 0;
66641 }
66642
66643@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
66644
66645 bt->dir = dir;
66646 bt->dev = dev;
66647- atomic_set(&bt->dropped, 0);
66648+ atomic_set_unchecked(&bt->dropped, 0);
66649
66650 ret = -EIO;
66651 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66652diff -urNp linux-2.6.32.45/kernel/trace/ftrace.c linux-2.6.32.45/kernel/trace/ftrace.c
66653--- linux-2.6.32.45/kernel/trace/ftrace.c 2011-06-25 12:55:35.000000000 -0400
66654+++ linux-2.6.32.45/kernel/trace/ftrace.c 2011-06-25 12:56:37.000000000 -0400
66655@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
66656
66657 ip = rec->ip;
66658
66659+ ret = ftrace_arch_code_modify_prepare();
66660+ FTRACE_WARN_ON(ret);
66661+ if (ret)
66662+ return 0;
66663+
66664 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66665+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66666 if (ret) {
66667 ftrace_bug(ret, ip);
66668 rec->flags |= FTRACE_FL_FAILED;
66669- return 0;
66670 }
66671- return 1;
66672+ return ret ? 0 : 1;
66673 }
66674
66675 /*
66676diff -urNp linux-2.6.32.45/kernel/trace/ring_buffer.c linux-2.6.32.45/kernel/trace/ring_buffer.c
66677--- linux-2.6.32.45/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
66678+++ linux-2.6.32.45/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
66679@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
66680 * the reader page). But if the next page is a header page,
66681 * its flags will be non zero.
66682 */
66683-static int inline
66684+static inline int
66685 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
66686 struct buffer_page *page, struct list_head *list)
66687 {
66688diff -urNp linux-2.6.32.45/kernel/trace/trace.c linux-2.6.32.45/kernel/trace/trace.c
66689--- linux-2.6.32.45/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
66690+++ linux-2.6.32.45/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
66691@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
66692 size_t rem;
66693 unsigned int i;
66694
66695+ pax_track_stack();
66696+
66697 /* copy the tracer to avoid using a global lock all around */
66698 mutex_lock(&trace_types_lock);
66699 if (unlikely(old_tracer != current_trace && current_trace)) {
66700@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
66701 int entries, size, i;
66702 size_t ret;
66703
66704+ pax_track_stack();
66705+
66706 if (*ppos & (PAGE_SIZE - 1)) {
66707 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
66708 return -EINVAL;
66709@@ -3816,10 +3820,9 @@ static const struct file_operations trac
66710 };
66711 #endif
66712
66713-static struct dentry *d_tracer;
66714-
66715 struct dentry *tracing_init_dentry(void)
66716 {
66717+ static struct dentry *d_tracer;
66718 static int once;
66719
66720 if (d_tracer)
66721@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
66722 return d_tracer;
66723 }
66724
66725-static struct dentry *d_percpu;
66726-
66727 struct dentry *tracing_dentry_percpu(void)
66728 {
66729+ static struct dentry *d_percpu;
66730 static int once;
66731 struct dentry *d_tracer;
66732
66733diff -urNp linux-2.6.32.45/kernel/trace/trace_events.c linux-2.6.32.45/kernel/trace/trace_events.c
66734--- linux-2.6.32.45/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
66735+++ linux-2.6.32.45/kernel/trace/trace_events.c 2011-08-05 20:33:55.000000000 -0400
66736@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list
66737 * Modules must own their file_operations to keep up with
66738 * reference counting.
66739 */
66740+
66741 struct ftrace_module_file_ops {
66742 struct list_head list;
66743 struct module *mod;
66744- struct file_operations id;
66745- struct file_operations enable;
66746- struct file_operations format;
66747- struct file_operations filter;
66748 };
66749
66750 static void remove_subsystem_dir(const char *name)
66751@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod
66752
66753 file_ops->mod = mod;
66754
66755- file_ops->id = ftrace_event_id_fops;
66756- file_ops->id.owner = mod;
66757-
66758- file_ops->enable = ftrace_enable_fops;
66759- file_ops->enable.owner = mod;
66760-
66761- file_ops->filter = ftrace_event_filter_fops;
66762- file_ops->filter.owner = mod;
66763-
66764- file_ops->format = ftrace_event_format_fops;
66765- file_ops->format.owner = mod;
66766+ pax_open_kernel();
66767+ *(void **)&mod->trace_id.owner = mod;
66768+ *(void **)&mod->trace_enable.owner = mod;
66769+ *(void **)&mod->trace_filter.owner = mod;
66770+ *(void **)&mod->trace_format.owner = mod;
66771+ pax_close_kernel();
66772
66773 list_add(&file_ops->list, &ftrace_module_file_list);
66774
66775@@ -1063,8 +1055,8 @@ static void trace_module_add_events(stru
66776 call->mod = mod;
66777 list_add(&call->list, &ftrace_events);
66778 event_create_dir(call, d_events,
66779- &file_ops->id, &file_ops->enable,
66780- &file_ops->filter, &file_ops->format);
66781+ &mod->trace_id, &mod->trace_enable,
66782+ &mod->trace_filter, &mod->trace_format);
66783 }
66784 }
66785
66786diff -urNp linux-2.6.32.45/kernel/trace/trace_mmiotrace.c linux-2.6.32.45/kernel/trace/trace_mmiotrace.c
66787--- linux-2.6.32.45/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
66788+++ linux-2.6.32.45/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
66789@@ -23,7 +23,7 @@ struct header_iter {
66790 static struct trace_array *mmio_trace_array;
66791 static bool overrun_detected;
66792 static unsigned long prev_overruns;
66793-static atomic_t dropped_count;
66794+static atomic_unchecked_t dropped_count;
66795
66796 static void mmio_reset_data(struct trace_array *tr)
66797 {
66798@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
66799
66800 static unsigned long count_overruns(struct trace_iterator *iter)
66801 {
66802- unsigned long cnt = atomic_xchg(&dropped_count, 0);
66803+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
66804 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
66805
66806 if (over > prev_overruns)
66807@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
66808 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
66809 sizeof(*entry), 0, pc);
66810 if (!event) {
66811- atomic_inc(&dropped_count);
66812+ atomic_inc_unchecked(&dropped_count);
66813 return;
66814 }
66815 entry = ring_buffer_event_data(event);
66816@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
66817 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
66818 sizeof(*entry), 0, pc);
66819 if (!event) {
66820- atomic_inc(&dropped_count);
66821+ atomic_inc_unchecked(&dropped_count);
66822 return;
66823 }
66824 entry = ring_buffer_event_data(event);
66825diff -urNp linux-2.6.32.45/kernel/trace/trace_output.c linux-2.6.32.45/kernel/trace/trace_output.c
66826--- linux-2.6.32.45/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
66827+++ linux-2.6.32.45/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
66828@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
66829 return 0;
66830 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
66831 if (!IS_ERR(p)) {
66832- p = mangle_path(s->buffer + s->len, p, "\n");
66833+ p = mangle_path(s->buffer + s->len, p, "\n\\");
66834 if (p) {
66835 s->len = p - s->buffer;
66836 return 1;
66837diff -urNp linux-2.6.32.45/kernel/trace/trace_stack.c linux-2.6.32.45/kernel/trace/trace_stack.c
66838--- linux-2.6.32.45/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
66839+++ linux-2.6.32.45/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
66840@@ -50,7 +50,7 @@ static inline void check_stack(void)
66841 return;
66842
66843 /* we do not handle interrupt stacks yet */
66844- if (!object_is_on_stack(&this_size))
66845+ if (!object_starts_on_stack(&this_size))
66846 return;
66847
66848 local_irq_save(flags);
66849diff -urNp linux-2.6.32.45/kernel/trace/trace_workqueue.c linux-2.6.32.45/kernel/trace/trace_workqueue.c
66850--- linux-2.6.32.45/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
66851+++ linux-2.6.32.45/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
66852@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
66853 int cpu;
66854 pid_t pid;
66855 /* Can be inserted from interrupt or user context, need to be atomic */
66856- atomic_t inserted;
66857+ atomic_unchecked_t inserted;
66858 /*
66859 * Don't need to be atomic, works are serialized in a single workqueue thread
66860 * on a single CPU.
66861@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
66862 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
66863 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
66864 if (node->pid == wq_thread->pid) {
66865- atomic_inc(&node->inserted);
66866+ atomic_inc_unchecked(&node->inserted);
66867 goto found;
66868 }
66869 }
66870@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
66871 tsk = get_pid_task(pid, PIDTYPE_PID);
66872 if (tsk) {
66873 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
66874- atomic_read(&cws->inserted), cws->executed,
66875+ atomic_read_unchecked(&cws->inserted), cws->executed,
66876 tsk->comm);
66877 put_task_struct(tsk);
66878 }
66879diff -urNp linux-2.6.32.45/kernel/user.c linux-2.6.32.45/kernel/user.c
66880--- linux-2.6.32.45/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
66881+++ linux-2.6.32.45/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
66882@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
66883 spin_lock_irq(&uidhash_lock);
66884 up = uid_hash_find(uid, hashent);
66885 if (up) {
66886+ put_user_ns(ns);
66887 key_put(new->uid_keyring);
66888 key_put(new->session_keyring);
66889 kmem_cache_free(uid_cachep, new);
66890diff -urNp linux-2.6.32.45/lib/bug.c linux-2.6.32.45/lib/bug.c
66891--- linux-2.6.32.45/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
66892+++ linux-2.6.32.45/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
66893@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
66894 return BUG_TRAP_TYPE_NONE;
66895
66896 bug = find_bug(bugaddr);
66897+ if (!bug)
66898+ return BUG_TRAP_TYPE_NONE;
66899
66900 printk(KERN_EMERG "------------[ cut here ]------------\n");
66901
66902diff -urNp linux-2.6.32.45/lib/debugobjects.c linux-2.6.32.45/lib/debugobjects.c
66903--- linux-2.6.32.45/lib/debugobjects.c 2011-07-13 17:23:04.000000000 -0400
66904+++ linux-2.6.32.45/lib/debugobjects.c 2011-07-13 17:23:19.000000000 -0400
66905@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
66906 if (limit > 4)
66907 return;
66908
66909- is_on_stack = object_is_on_stack(addr);
66910+ is_on_stack = object_starts_on_stack(addr);
66911 if (is_on_stack == onstack)
66912 return;
66913
66914diff -urNp linux-2.6.32.45/lib/dma-debug.c linux-2.6.32.45/lib/dma-debug.c
66915--- linux-2.6.32.45/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
66916+++ linux-2.6.32.45/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
66917@@ -861,7 +861,7 @@ out:
66918
66919 static void check_for_stack(struct device *dev, void *addr)
66920 {
66921- if (object_is_on_stack(addr))
66922+ if (object_starts_on_stack(addr))
66923 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
66924 "stack [addr=%p]\n", addr);
66925 }
66926diff -urNp linux-2.6.32.45/lib/idr.c linux-2.6.32.45/lib/idr.c
66927--- linux-2.6.32.45/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
66928+++ linux-2.6.32.45/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
66929@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
66930 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
66931
66932 /* if already at the top layer, we need to grow */
66933- if (id >= 1 << (idp->layers * IDR_BITS)) {
66934+ if (id >= (1 << (idp->layers * IDR_BITS))) {
66935 *starting_id = id;
66936 return IDR_NEED_TO_GROW;
66937 }
66938diff -urNp linux-2.6.32.45/lib/inflate.c linux-2.6.32.45/lib/inflate.c
66939--- linux-2.6.32.45/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
66940+++ linux-2.6.32.45/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
66941@@ -266,7 +266,7 @@ static void free(void *where)
66942 malloc_ptr = free_mem_ptr;
66943 }
66944 #else
66945-#define malloc(a) kmalloc(a, GFP_KERNEL)
66946+#define malloc(a) kmalloc((a), GFP_KERNEL)
66947 #define free(a) kfree(a)
66948 #endif
66949
66950diff -urNp linux-2.6.32.45/lib/Kconfig.debug linux-2.6.32.45/lib/Kconfig.debug
66951--- linux-2.6.32.45/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
66952+++ linux-2.6.32.45/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
66953@@ -905,7 +905,7 @@ config LATENCYTOP
66954 select STACKTRACE
66955 select SCHEDSTATS
66956 select SCHED_DEBUG
66957- depends on HAVE_LATENCYTOP_SUPPORT
66958+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
66959 help
66960 Enable this option if you want to use the LatencyTOP tool
66961 to find out which userspace is blocking on what kernel operations.
66962diff -urNp linux-2.6.32.45/lib/kobject.c linux-2.6.32.45/lib/kobject.c
66963--- linux-2.6.32.45/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
66964+++ linux-2.6.32.45/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
66965@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
66966 return ret;
66967 }
66968
66969-struct sysfs_ops kobj_sysfs_ops = {
66970+const struct sysfs_ops kobj_sysfs_ops = {
66971 .show = kobj_attr_show,
66972 .store = kobj_attr_store,
66973 };
66974@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
66975 * If the kset was not able to be created, NULL will be returned.
66976 */
66977 static struct kset *kset_create(const char *name,
66978- struct kset_uevent_ops *uevent_ops,
66979+ const struct kset_uevent_ops *uevent_ops,
66980 struct kobject *parent_kobj)
66981 {
66982 struct kset *kset;
66983@@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
66984 * If the kset was not able to be created, NULL will be returned.
66985 */
66986 struct kset *kset_create_and_add(const char *name,
66987- struct kset_uevent_ops *uevent_ops,
66988+ const struct kset_uevent_ops *uevent_ops,
66989 struct kobject *parent_kobj)
66990 {
66991 struct kset *kset;
66992diff -urNp linux-2.6.32.45/lib/kobject_uevent.c linux-2.6.32.45/lib/kobject_uevent.c
66993--- linux-2.6.32.45/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
66994+++ linux-2.6.32.45/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
66995@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
66996 const char *subsystem;
66997 struct kobject *top_kobj;
66998 struct kset *kset;
66999- struct kset_uevent_ops *uevent_ops;
67000+ const struct kset_uevent_ops *uevent_ops;
67001 u64 seq;
67002 int i = 0;
67003 int retval = 0;
67004diff -urNp linux-2.6.32.45/lib/kref.c linux-2.6.32.45/lib/kref.c
67005--- linux-2.6.32.45/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
67006+++ linux-2.6.32.45/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
67007@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
67008 */
67009 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
67010 {
67011- WARN_ON(release == NULL);
67012+ BUG_ON(release == NULL);
67013 WARN_ON(release == (void (*)(struct kref *))kfree);
67014
67015 if (atomic_dec_and_test(&kref->refcount)) {
67016diff -urNp linux-2.6.32.45/lib/parser.c linux-2.6.32.45/lib/parser.c
67017--- linux-2.6.32.45/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
67018+++ linux-2.6.32.45/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
67019@@ -126,7 +126,7 @@ static int match_number(substring_t *s,
67020 char *buf;
67021 int ret;
67022
67023- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
67024+ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
67025 if (!buf)
67026 return -ENOMEM;
67027 memcpy(buf, s->from, s->to - s->from);
67028diff -urNp linux-2.6.32.45/lib/radix-tree.c linux-2.6.32.45/lib/radix-tree.c
67029--- linux-2.6.32.45/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
67030+++ linux-2.6.32.45/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
67031@@ -81,7 +81,7 @@ struct radix_tree_preload {
67032 int nr;
67033 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
67034 };
67035-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
67036+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
67037
67038 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
67039 {
67040diff -urNp linux-2.6.32.45/lib/random32.c linux-2.6.32.45/lib/random32.c
67041--- linux-2.6.32.45/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
67042+++ linux-2.6.32.45/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
67043@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
67044 */
67045 static inline u32 __seed(u32 x, u32 m)
67046 {
67047- return (x < m) ? x + m : x;
67048+ return (x <= m) ? x + m + 1 : x;
67049 }
67050
67051 /**
67052diff -urNp linux-2.6.32.45/lib/vsprintf.c linux-2.6.32.45/lib/vsprintf.c
67053--- linux-2.6.32.45/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
67054+++ linux-2.6.32.45/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
67055@@ -16,6 +16,9 @@
67056 * - scnprintf and vscnprintf
67057 */
67058
67059+#ifdef CONFIG_GRKERNSEC_HIDESYM
67060+#define __INCLUDED_BY_HIDESYM 1
67061+#endif
67062 #include <stdarg.h>
67063 #include <linux/module.h>
67064 #include <linux/types.h>
67065@@ -546,12 +549,12 @@ static char *number(char *buf, char *end
67066 return buf;
67067 }
67068
67069-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
67070+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
67071 {
67072 int len, i;
67073
67074 if ((unsigned long)s < PAGE_SIZE)
67075- s = "<NULL>";
67076+ s = "(null)";
67077
67078 len = strnlen(s, spec.precision);
67079
67080@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
67081 unsigned long value = (unsigned long) ptr;
67082 #ifdef CONFIG_KALLSYMS
67083 char sym[KSYM_SYMBOL_LEN];
67084- if (ext != 'f' && ext != 's')
67085+ if (ext != 'f' && ext != 's' && ext != 'a')
67086 sprint_symbol(sym, value);
67087 else
67088 kallsyms_lookup(value, NULL, NULL, NULL, sym);
67089@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
67090 * - 'f' For simple symbolic function names without offset
67091 * - 'S' For symbolic direct pointers with offset
67092 * - 's' For symbolic direct pointers without offset
67093+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
67094+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
67095 * - 'R' For a struct resource pointer, it prints the range of
67096 * addresses (not the name nor the flags)
67097 * - 'M' For a 6-byte MAC address, it prints the address in the
67098@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
67099 struct printf_spec spec)
67100 {
67101 if (!ptr)
67102- return string(buf, end, "(null)", spec);
67103+ return string(buf, end, "(nil)", spec);
67104
67105 switch (*fmt) {
67106 case 'F':
67107@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
67108 case 's':
67109 /* Fallthrough */
67110 case 'S':
67111+#ifdef CONFIG_GRKERNSEC_HIDESYM
67112+ break;
67113+#else
67114+ return symbol_string(buf, end, ptr, spec, *fmt);
67115+#endif
67116+ case 'a':
67117+ /* Fallthrough */
67118+ case 'A':
67119 return symbol_string(buf, end, ptr, spec, *fmt);
67120 case 'R':
67121 return resource_string(buf, end, ptr, spec);
67122@@ -1445,7 +1458,7 @@ do { \
67123 size_t len;
67124 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
67125 || (unsigned long)save_str < PAGE_SIZE)
67126- save_str = "<NULL>";
67127+ save_str = "(null)";
67128 len = strlen(save_str);
67129 if (str + len + 1 < end)
67130 memcpy(str, save_str, len + 1);
67131@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
67132 typeof(type) value; \
67133 if (sizeof(type) == 8) { \
67134 args = PTR_ALIGN(args, sizeof(u32)); \
67135- *(u32 *)&value = *(u32 *)args; \
67136- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67137+ *(u32 *)&value = *(const u32 *)args; \
67138+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67139 } else { \
67140 args = PTR_ALIGN(args, sizeof(type)); \
67141- value = *(typeof(type) *)args; \
67142+ value = *(const typeof(type) *)args; \
67143 } \
67144 args += sizeof(type); \
67145 value; \
67146@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
67147 const char *str_arg = args;
67148 size_t len = strlen(str_arg);
67149 args += len + 1;
67150- str = string(str, end, (char *)str_arg, spec);
67151+ str = string(str, end, str_arg, spec);
67152 break;
67153 }
67154
67155diff -urNp linux-2.6.32.45/localversion-grsec linux-2.6.32.45/localversion-grsec
67156--- linux-2.6.32.45/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
67157+++ linux-2.6.32.45/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
67158@@ -0,0 +1 @@
67159+-grsec
67160diff -urNp linux-2.6.32.45/Makefile linux-2.6.32.45/Makefile
67161--- linux-2.6.32.45/Makefile 2011-08-16 20:37:25.000000000 -0400
67162+++ linux-2.6.32.45/Makefile 2011-08-21 19:35:55.000000000 -0400
67163@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
67164
67165 HOSTCC = gcc
67166 HOSTCXX = g++
67167-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
67168-HOSTCXXFLAGS = -O2
67169+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
67170+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
67171+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
67172
67173 # Decide whether to build built-in, modular, or both.
67174 # Normally, just do built-in.
67175@@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
67176 KBUILD_CPPFLAGS := -D__KERNEL__
67177
67178 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
67179+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
67180 -fno-strict-aliasing -fno-common \
67181 -Werror-implicit-function-declaration \
67182 -Wno-format-security \
67183 -fno-delete-null-pointer-checks
67184+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
67185 KBUILD_AFLAGS := -D__ASSEMBLY__
67186
67187 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
67188@@ -376,8 +379,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
67189 # Rules shared between *config targets and build targets
67190
67191 # Basic helpers built in scripts/
67192-PHONY += scripts_basic
67193-scripts_basic:
67194+PHONY += scripts_basic gcc-plugins
67195+scripts_basic: gcc-plugins
67196 $(Q)$(MAKE) $(build)=scripts/basic
67197
67198 # To avoid any implicit rule to kick in, define an empty command.
67199@@ -403,7 +406,7 @@ endif
67200 # of make so .config is not included in this case either (for *config).
67201
67202 no-dot-config-targets := clean mrproper distclean \
67203- cscope TAGS tags help %docs check% \
67204+ cscope gtags TAGS tags help %docs check% \
67205 include/linux/version.h headers_% \
67206 kernelrelease kernelversion
67207
67208@@ -526,6 +529,25 @@ else
67209 KBUILD_CFLAGS += -O2
67210 endif
67211
67212+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
67213+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
67214+ifdef CONFIG_PAX_MEMORY_STACKLEAK
67215+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
67216+endif
67217+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
67218+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
67219+gcc-plugins:
67220+ $(Q)$(MAKE) $(build)=tools/gcc
67221+else
67222+gcc-plugins:
67223+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
67224+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
67225+else
67226+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
67227+endif
67228+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
67229+endif
67230+
67231 include $(srctree)/arch/$(SRCARCH)/Makefile
67232
67233 ifneq ($(CONFIG_FRAME_WARN),0)
67234@@ -644,7 +666,7 @@ export mod_strip_cmd
67235
67236
67237 ifeq ($(KBUILD_EXTMOD),)
67238-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
67239+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
67240
67241 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
67242 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
67243@@ -970,7 +992,7 @@ ifneq ($(KBUILD_SRC),)
67244 endif
67245
67246 # prepare2 creates a makefile if using a separate output directory
67247-prepare2: prepare3 outputmakefile
67248+prepare2: prepare3 outputmakefile gcc-plugins
67249
67250 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
67251 include/asm include/config/auto.conf
67252@@ -1198,7 +1220,7 @@ MRPROPER_FILES += .config .config.old in
67253 include/linux/autoconf.h include/linux/version.h \
67254 include/linux/utsrelease.h \
67255 include/linux/bounds.h include/asm*/asm-offsets.h \
67256- Module.symvers Module.markers tags TAGS cscope*
67257+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
67258
67259 # clean - Delete most, but leave enough to build external modules
67260 #
67261@@ -1289,6 +1311,7 @@ help:
67262 @echo ' modules_prepare - Set up for building external modules'
67263 @echo ' tags/TAGS - Generate tags file for editors'
67264 @echo ' cscope - Generate cscope index'
67265+ @echo ' gtags - Generate GNU GLOBAL index'
67266 @echo ' kernelrelease - Output the release version string'
67267 @echo ' kernelversion - Output the version stored in Makefile'
67268 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
67269@@ -1421,7 +1444,7 @@ clean: $(clean-dirs)
67270 $(call cmd,rmdirs)
67271 $(call cmd,rmfiles)
67272 @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \
67273- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
67274+ \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
67275 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
67276 -o -name '*.gcno' \) -type f -print | xargs rm -f
67277
67278@@ -1445,7 +1468,7 @@ endif # KBUILD_EXTMOD
67279 quiet_cmd_tags = GEN $@
67280 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
67281
67282-tags TAGS cscope: FORCE
67283+tags TAGS cscope gtags: FORCE
67284 $(call cmd,tags)
67285
67286 # Scripts to check various things for consistency
67287diff -urNp linux-2.6.32.45/mm/backing-dev.c linux-2.6.32.45/mm/backing-dev.c
67288--- linux-2.6.32.45/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
67289+++ linux-2.6.32.45/mm/backing-dev.c 2011-08-11 19:48:17.000000000 -0400
67290@@ -272,7 +272,7 @@ static void bdi_task_init(struct backing
67291 list_add_tail_rcu(&wb->list, &bdi->wb_list);
67292 spin_unlock(&bdi->wb_lock);
67293
67294- tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
67295+ tsk->flags |= PF_SWAPWRITE;
67296 set_freezable();
67297
67298 /*
67299@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
67300 * Add the default flusher task that gets created for any bdi
67301 * that has dirty data pending writeout
67302 */
67303-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
67304+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
67305 {
67306 if (!bdi_cap_writeback_dirty(bdi))
67307 return;
67308diff -urNp linux-2.6.32.45/mm/filemap.c linux-2.6.32.45/mm/filemap.c
67309--- linux-2.6.32.45/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
67310+++ linux-2.6.32.45/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
67311@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
67312 struct address_space *mapping = file->f_mapping;
67313
67314 if (!mapping->a_ops->readpage)
67315- return -ENOEXEC;
67316+ return -ENODEV;
67317 file_accessed(file);
67318 vma->vm_ops = &generic_file_vm_ops;
67319 vma->vm_flags |= VM_CAN_NONLINEAR;
67320@@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
67321 *pos = i_size_read(inode);
67322
67323 if (limit != RLIM_INFINITY) {
67324+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67325 if (*pos >= limit) {
67326 send_sig(SIGXFSZ, current, 0);
67327 return -EFBIG;
67328diff -urNp linux-2.6.32.45/mm/fremap.c linux-2.6.32.45/mm/fremap.c
67329--- linux-2.6.32.45/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
67330+++ linux-2.6.32.45/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
67331@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67332 retry:
67333 vma = find_vma(mm, start);
67334
67335+#ifdef CONFIG_PAX_SEGMEXEC
67336+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67337+ goto out;
67338+#endif
67339+
67340 /*
67341 * Make sure the vma is shared, that it supports prefaulting,
67342 * and that the remapped range is valid and fully within
67343@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67344 /*
67345 * drop PG_Mlocked flag for over-mapped range
67346 */
67347- unsigned int saved_flags = vma->vm_flags;
67348+ unsigned long saved_flags = vma->vm_flags;
67349 munlock_vma_pages_range(vma, start, start + size);
67350 vma->vm_flags = saved_flags;
67351 }
67352diff -urNp linux-2.6.32.45/mm/highmem.c linux-2.6.32.45/mm/highmem.c
67353--- linux-2.6.32.45/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
67354+++ linux-2.6.32.45/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
67355@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
67356 * So no dangers, even with speculative execution.
67357 */
67358 page = pte_page(pkmap_page_table[i]);
67359+ pax_open_kernel();
67360 pte_clear(&init_mm, (unsigned long)page_address(page),
67361 &pkmap_page_table[i]);
67362-
67363+ pax_close_kernel();
67364 set_page_address(page, NULL);
67365 need_flush = 1;
67366 }
67367@@ -177,9 +178,11 @@ start:
67368 }
67369 }
67370 vaddr = PKMAP_ADDR(last_pkmap_nr);
67371+
67372+ pax_open_kernel();
67373 set_pte_at(&init_mm, vaddr,
67374 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67375-
67376+ pax_close_kernel();
67377 pkmap_count[last_pkmap_nr] = 1;
67378 set_page_address(page, (void *)vaddr);
67379
67380diff -urNp linux-2.6.32.45/mm/hugetlb.c linux-2.6.32.45/mm/hugetlb.c
67381--- linux-2.6.32.45/mm/hugetlb.c 2011-07-13 17:23:04.000000000 -0400
67382+++ linux-2.6.32.45/mm/hugetlb.c 2011-07-13 17:23:19.000000000 -0400
67383@@ -1933,6 +1933,26 @@ static int unmap_ref_private(struct mm_s
67384 return 1;
67385 }
67386
67387+#ifdef CONFIG_PAX_SEGMEXEC
67388+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67389+{
67390+ struct mm_struct *mm = vma->vm_mm;
67391+ struct vm_area_struct *vma_m;
67392+ unsigned long address_m;
67393+ pte_t *ptep_m;
67394+
67395+ vma_m = pax_find_mirror_vma(vma);
67396+ if (!vma_m)
67397+ return;
67398+
67399+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67400+ address_m = address + SEGMEXEC_TASK_SIZE;
67401+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67402+ get_page(page_m);
67403+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67404+}
67405+#endif
67406+
67407 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
67408 unsigned long address, pte_t *ptep, pte_t pte,
67409 struct page *pagecache_page)
67410@@ -2004,6 +2024,11 @@ retry_avoidcopy:
67411 huge_ptep_clear_flush(vma, address, ptep);
67412 set_huge_pte_at(mm, address, ptep,
67413 make_huge_pte(vma, new_page, 1));
67414+
67415+#ifdef CONFIG_PAX_SEGMEXEC
67416+ pax_mirror_huge_pte(vma, address, new_page);
67417+#endif
67418+
67419 /* Make the old page be freed below */
67420 new_page = old_page;
67421 }
67422@@ -2135,6 +2160,10 @@ retry:
67423 && (vma->vm_flags & VM_SHARED)));
67424 set_huge_pte_at(mm, address, ptep, new_pte);
67425
67426+#ifdef CONFIG_PAX_SEGMEXEC
67427+ pax_mirror_huge_pte(vma, address, page);
67428+#endif
67429+
67430 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67431 /* Optimization, do the COW without a second fault */
67432 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67433@@ -2163,6 +2192,28 @@ int hugetlb_fault(struct mm_struct *mm,
67434 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67435 struct hstate *h = hstate_vma(vma);
67436
67437+#ifdef CONFIG_PAX_SEGMEXEC
67438+ struct vm_area_struct *vma_m;
67439+
67440+ vma_m = pax_find_mirror_vma(vma);
67441+ if (vma_m) {
67442+ unsigned long address_m;
67443+
67444+ if (vma->vm_start > vma_m->vm_start) {
67445+ address_m = address;
67446+ address -= SEGMEXEC_TASK_SIZE;
67447+ vma = vma_m;
67448+ h = hstate_vma(vma);
67449+ } else
67450+ address_m = address + SEGMEXEC_TASK_SIZE;
67451+
67452+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67453+ return VM_FAULT_OOM;
67454+ address_m &= HPAGE_MASK;
67455+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67456+ }
67457+#endif
67458+
67459 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67460 if (!ptep)
67461 return VM_FAULT_OOM;
67462diff -urNp linux-2.6.32.45/mm/internal.h linux-2.6.32.45/mm/internal.h
67463--- linux-2.6.32.45/mm/internal.h 2011-03-27 14:31:47.000000000 -0400
67464+++ linux-2.6.32.45/mm/internal.h 2011-07-09 09:13:08.000000000 -0400
67465@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
67466 * in mm/page_alloc.c
67467 */
67468 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67469+extern void free_compound_page(struct page *page);
67470 extern void prep_compound_page(struct page *page, unsigned long order);
67471
67472
67473diff -urNp linux-2.6.32.45/mm/Kconfig linux-2.6.32.45/mm/Kconfig
67474--- linux-2.6.32.45/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
67475+++ linux-2.6.32.45/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
67476@@ -228,7 +228,7 @@ config KSM
67477 config DEFAULT_MMAP_MIN_ADDR
67478 int "Low address space to protect from user allocation"
67479 depends on MMU
67480- default 4096
67481+ default 65536
67482 help
67483 This is the portion of low virtual memory which should be protected
67484 from userspace allocation. Keeping a user from writing to low pages
67485diff -urNp linux-2.6.32.45/mm/kmemleak.c linux-2.6.32.45/mm/kmemleak.c
67486--- linux-2.6.32.45/mm/kmemleak.c 2011-06-25 12:55:35.000000000 -0400
67487+++ linux-2.6.32.45/mm/kmemleak.c 2011-06-25 12:56:37.000000000 -0400
67488@@ -358,7 +358,7 @@ static void print_unreferenced(struct se
67489
67490 for (i = 0; i < object->trace_len; i++) {
67491 void *ptr = (void *)object->trace[i];
67492- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67493+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67494 }
67495 }
67496
67497diff -urNp linux-2.6.32.45/mm/maccess.c linux-2.6.32.45/mm/maccess.c
67498--- linux-2.6.32.45/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
67499+++ linux-2.6.32.45/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
67500@@ -14,7 +14,7 @@
67501 * Safely read from address @src to the buffer at @dst. If a kernel fault
67502 * happens, handle that and return -EFAULT.
67503 */
67504-long probe_kernel_read(void *dst, void *src, size_t size)
67505+long probe_kernel_read(void *dst, const void *src, size_t size)
67506 {
67507 long ret;
67508 mm_segment_t old_fs = get_fs();
67509@@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
67510 * Safely write to address @dst from the buffer at @src. If a kernel fault
67511 * happens, handle that and return -EFAULT.
67512 */
67513-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
67514+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
67515 {
67516 long ret;
67517 mm_segment_t old_fs = get_fs();
67518diff -urNp linux-2.6.32.45/mm/madvise.c linux-2.6.32.45/mm/madvise.c
67519--- linux-2.6.32.45/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
67520+++ linux-2.6.32.45/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
67521@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
67522 pgoff_t pgoff;
67523 unsigned long new_flags = vma->vm_flags;
67524
67525+#ifdef CONFIG_PAX_SEGMEXEC
67526+ struct vm_area_struct *vma_m;
67527+#endif
67528+
67529 switch (behavior) {
67530 case MADV_NORMAL:
67531 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67532@@ -103,6 +107,13 @@ success:
67533 /*
67534 * vm_flags is protected by the mmap_sem held in write mode.
67535 */
67536+
67537+#ifdef CONFIG_PAX_SEGMEXEC
67538+ vma_m = pax_find_mirror_vma(vma);
67539+ if (vma_m)
67540+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67541+#endif
67542+
67543 vma->vm_flags = new_flags;
67544
67545 out:
67546@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
67547 struct vm_area_struct ** prev,
67548 unsigned long start, unsigned long end)
67549 {
67550+
67551+#ifdef CONFIG_PAX_SEGMEXEC
67552+ struct vm_area_struct *vma_m;
67553+#endif
67554+
67555 *prev = vma;
67556 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67557 return -EINVAL;
67558@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
67559 zap_page_range(vma, start, end - start, &details);
67560 } else
67561 zap_page_range(vma, start, end - start, NULL);
67562+
67563+#ifdef CONFIG_PAX_SEGMEXEC
67564+ vma_m = pax_find_mirror_vma(vma);
67565+ if (vma_m) {
67566+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67567+ struct zap_details details = {
67568+ .nonlinear_vma = vma_m,
67569+ .last_index = ULONG_MAX,
67570+ };
67571+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67572+ } else
67573+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67574+ }
67575+#endif
67576+
67577 return 0;
67578 }
67579
67580@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67581 if (end < start)
67582 goto out;
67583
67584+#ifdef CONFIG_PAX_SEGMEXEC
67585+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67586+ if (end > SEGMEXEC_TASK_SIZE)
67587+ goto out;
67588+ } else
67589+#endif
67590+
67591+ if (end > TASK_SIZE)
67592+ goto out;
67593+
67594 error = 0;
67595 if (end == start)
67596 goto out;
67597diff -urNp linux-2.6.32.45/mm/memory.c linux-2.6.32.45/mm/memory.c
67598--- linux-2.6.32.45/mm/memory.c 2011-07-13 17:23:04.000000000 -0400
67599+++ linux-2.6.32.45/mm/memory.c 2011-07-13 17:23:23.000000000 -0400
67600@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
67601 return;
67602
67603 pmd = pmd_offset(pud, start);
67604+
67605+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67606 pud_clear(pud);
67607 pmd_free_tlb(tlb, pmd, start);
67608+#endif
67609+
67610 }
67611
67612 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67613@@ -219,9 +223,12 @@ static inline void free_pud_range(struct
67614 if (end - 1 > ceiling - 1)
67615 return;
67616
67617+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67618 pud = pud_offset(pgd, start);
67619 pgd_clear(pgd);
67620 pud_free_tlb(tlb, pud, start);
67621+#endif
67622+
67623 }
67624
67625 /*
67626@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
67627 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67628 i = 0;
67629
67630- do {
67631+ while (nr_pages) {
67632 struct vm_area_struct *vma;
67633
67634- vma = find_extend_vma(mm, start);
67635+ vma = find_vma(mm, start);
67636 if (!vma && in_gate_area(tsk, start)) {
67637 unsigned long pg = start & PAGE_MASK;
67638 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
67639@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
67640 continue;
67641 }
67642
67643- if (!vma ||
67644+ if (!vma || start < vma->vm_start ||
67645 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67646 !(vm_flags & vma->vm_flags))
67647 return i ? : -EFAULT;
67648@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
67649 start += PAGE_SIZE;
67650 nr_pages--;
67651 } while (nr_pages && start < vma->vm_end);
67652- } while (nr_pages);
67653+ }
67654 return i;
67655 }
67656
67657@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
67658 page_add_file_rmap(page);
67659 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67660
67661+#ifdef CONFIG_PAX_SEGMEXEC
67662+ pax_mirror_file_pte(vma, addr, page, ptl);
67663+#endif
67664+
67665 retval = 0;
67666 pte_unmap_unlock(pte, ptl);
67667 return retval;
67668@@ -1560,10 +1571,22 @@ out:
67669 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67670 struct page *page)
67671 {
67672+
67673+#ifdef CONFIG_PAX_SEGMEXEC
67674+ struct vm_area_struct *vma_m;
67675+#endif
67676+
67677 if (addr < vma->vm_start || addr >= vma->vm_end)
67678 return -EFAULT;
67679 if (!page_count(page))
67680 return -EINVAL;
67681+
67682+#ifdef CONFIG_PAX_SEGMEXEC
67683+ vma_m = pax_find_mirror_vma(vma);
67684+ if (vma_m)
67685+ vma_m->vm_flags |= VM_INSERTPAGE;
67686+#endif
67687+
67688 vma->vm_flags |= VM_INSERTPAGE;
67689 return insert_page(vma, addr, page, vma->vm_page_prot);
67690 }
67691@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
67692 unsigned long pfn)
67693 {
67694 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67695+ BUG_ON(vma->vm_mirror);
67696
67697 if (addr < vma->vm_start || addr >= vma->vm_end)
67698 return -EFAULT;
67699@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
67700 copy_user_highpage(dst, src, va, vma);
67701 }
67702
67703+#ifdef CONFIG_PAX_SEGMEXEC
67704+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
67705+{
67706+ struct mm_struct *mm = vma->vm_mm;
67707+ spinlock_t *ptl;
67708+ pte_t *pte, entry;
67709+
67710+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
67711+ entry = *pte;
67712+ if (!pte_present(entry)) {
67713+ if (!pte_none(entry)) {
67714+ BUG_ON(pte_file(entry));
67715+ free_swap_and_cache(pte_to_swp_entry(entry));
67716+ pte_clear_not_present_full(mm, address, pte, 0);
67717+ }
67718+ } else {
67719+ struct page *page;
67720+
67721+ flush_cache_page(vma, address, pte_pfn(entry));
67722+ entry = ptep_clear_flush(vma, address, pte);
67723+ BUG_ON(pte_dirty(entry));
67724+ page = vm_normal_page(vma, address, entry);
67725+ if (page) {
67726+ update_hiwater_rss(mm);
67727+ if (PageAnon(page))
67728+ dec_mm_counter(mm, anon_rss);
67729+ else
67730+ dec_mm_counter(mm, file_rss);
67731+ page_remove_rmap(page);
67732+ page_cache_release(page);
67733+ }
67734+ }
67735+ pte_unmap_unlock(pte, ptl);
67736+}
67737+
67738+/* PaX: if vma is mirrored, synchronize the mirror's PTE
67739+ *
67740+ * the ptl of the lower mapped page is held on entry and is not released on exit
67741+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
67742+ */
67743+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67744+{
67745+ struct mm_struct *mm = vma->vm_mm;
67746+ unsigned long address_m;
67747+ spinlock_t *ptl_m;
67748+ struct vm_area_struct *vma_m;
67749+ pmd_t *pmd_m;
67750+ pte_t *pte_m, entry_m;
67751+
67752+ BUG_ON(!page_m || !PageAnon(page_m));
67753+
67754+ vma_m = pax_find_mirror_vma(vma);
67755+ if (!vma_m)
67756+ return;
67757+
67758+ BUG_ON(!PageLocked(page_m));
67759+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67760+ address_m = address + SEGMEXEC_TASK_SIZE;
67761+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67762+ pte_m = pte_offset_map_nested(pmd_m, address_m);
67763+ ptl_m = pte_lockptr(mm, pmd_m);
67764+ if (ptl != ptl_m) {
67765+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67766+ if (!pte_none(*pte_m))
67767+ goto out;
67768+ }
67769+
67770+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67771+ page_cache_get(page_m);
67772+ page_add_anon_rmap(page_m, vma_m, address_m);
67773+ inc_mm_counter(mm, anon_rss);
67774+ set_pte_at(mm, address_m, pte_m, entry_m);
67775+ update_mmu_cache(vma_m, address_m, entry_m);
67776+out:
67777+ if (ptl != ptl_m)
67778+ spin_unlock(ptl_m);
67779+ pte_unmap_nested(pte_m);
67780+ unlock_page(page_m);
67781+}
67782+
67783+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
67784+{
67785+ struct mm_struct *mm = vma->vm_mm;
67786+ unsigned long address_m;
67787+ spinlock_t *ptl_m;
67788+ struct vm_area_struct *vma_m;
67789+ pmd_t *pmd_m;
67790+ pte_t *pte_m, entry_m;
67791+
67792+ BUG_ON(!page_m || PageAnon(page_m));
67793+
67794+ vma_m = pax_find_mirror_vma(vma);
67795+ if (!vma_m)
67796+ return;
67797+
67798+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67799+ address_m = address + SEGMEXEC_TASK_SIZE;
67800+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67801+ pte_m = pte_offset_map_nested(pmd_m, address_m);
67802+ ptl_m = pte_lockptr(mm, pmd_m);
67803+ if (ptl != ptl_m) {
67804+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67805+ if (!pte_none(*pte_m))
67806+ goto out;
67807+ }
67808+
67809+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
67810+ page_cache_get(page_m);
67811+ page_add_file_rmap(page_m);
67812+ inc_mm_counter(mm, file_rss);
67813+ set_pte_at(mm, address_m, pte_m, entry_m);
67814+ update_mmu_cache(vma_m, address_m, entry_m);
67815+out:
67816+ if (ptl != ptl_m)
67817+ spin_unlock(ptl_m);
67818+ pte_unmap_nested(pte_m);
67819+}
67820+
67821+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
67822+{
67823+ struct mm_struct *mm = vma->vm_mm;
67824+ unsigned long address_m;
67825+ spinlock_t *ptl_m;
67826+ struct vm_area_struct *vma_m;
67827+ pmd_t *pmd_m;
67828+ pte_t *pte_m, entry_m;
67829+
67830+ vma_m = pax_find_mirror_vma(vma);
67831+ if (!vma_m)
67832+ return;
67833+
67834+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67835+ address_m = address + SEGMEXEC_TASK_SIZE;
67836+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
67837+ pte_m = pte_offset_map_nested(pmd_m, address_m);
67838+ ptl_m = pte_lockptr(mm, pmd_m);
67839+ if (ptl != ptl_m) {
67840+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
67841+ if (!pte_none(*pte_m))
67842+ goto out;
67843+ }
67844+
67845+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
67846+ set_pte_at(mm, address_m, pte_m, entry_m);
67847+out:
67848+ if (ptl != ptl_m)
67849+ spin_unlock(ptl_m);
67850+ pte_unmap_nested(pte_m);
67851+}
67852+
67853+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
67854+{
67855+ struct page *page_m;
67856+ pte_t entry;
67857+
67858+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
67859+ goto out;
67860+
67861+ entry = *pte;
67862+ page_m = vm_normal_page(vma, address, entry);
67863+ if (!page_m)
67864+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
67865+ else if (PageAnon(page_m)) {
67866+ if (pax_find_mirror_vma(vma)) {
67867+ pte_unmap_unlock(pte, ptl);
67868+ lock_page(page_m);
67869+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
67870+ if (pte_same(entry, *pte))
67871+ pax_mirror_anon_pte(vma, address, page_m, ptl);
67872+ else
67873+ unlock_page(page_m);
67874+ }
67875+ } else
67876+ pax_mirror_file_pte(vma, address, page_m, ptl);
67877+
67878+out:
67879+ pte_unmap_unlock(pte, ptl);
67880+}
67881+#endif
67882+
67883 /*
67884 * This routine handles present pages, when users try to write
67885 * to a shared page. It is done by copying the page to a new address
67886@@ -2156,6 +2360,12 @@ gotten:
67887 */
67888 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67889 if (likely(pte_same(*page_table, orig_pte))) {
67890+
67891+#ifdef CONFIG_PAX_SEGMEXEC
67892+ if (pax_find_mirror_vma(vma))
67893+ BUG_ON(!trylock_page(new_page));
67894+#endif
67895+
67896 if (old_page) {
67897 if (!PageAnon(old_page)) {
67898 dec_mm_counter(mm, file_rss);
67899@@ -2207,6 +2417,10 @@ gotten:
67900 page_remove_rmap(old_page);
67901 }
67902
67903+#ifdef CONFIG_PAX_SEGMEXEC
67904+ pax_mirror_anon_pte(vma, address, new_page, ptl);
67905+#endif
67906+
67907 /* Free the old page.. */
67908 new_page = old_page;
67909 ret |= VM_FAULT_WRITE;
67910@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct
67911 swap_free(entry);
67912 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
67913 try_to_free_swap(page);
67914+
67915+#ifdef CONFIG_PAX_SEGMEXEC
67916+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
67917+#endif
67918+
67919 unlock_page(page);
67920
67921 if (flags & FAULT_FLAG_WRITE) {
67922@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct
67923
67924 /* No need to invalidate - it was non-present before */
67925 update_mmu_cache(vma, address, pte);
67926+
67927+#ifdef CONFIG_PAX_SEGMEXEC
67928+ pax_mirror_anon_pte(vma, address, page, ptl);
67929+#endif
67930+
67931 unlock:
67932 pte_unmap_unlock(page_table, ptl);
67933 out:
67934@@ -2632,40 +2856,6 @@ out_release:
67935 }
67936
67937 /*
67938- * This is like a special single-page "expand_{down|up}wards()",
67939- * except we must first make sure that 'address{-|+}PAGE_SIZE'
67940- * doesn't hit another vma.
67941- */
67942-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
67943-{
67944- address &= PAGE_MASK;
67945- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
67946- struct vm_area_struct *prev = vma->vm_prev;
67947-
67948- /*
67949- * Is there a mapping abutting this one below?
67950- *
67951- * That's only ok if it's the same stack mapping
67952- * that has gotten split..
67953- */
67954- if (prev && prev->vm_end == address)
67955- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
67956-
67957- expand_stack(vma, address - PAGE_SIZE);
67958- }
67959- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
67960- struct vm_area_struct *next = vma->vm_next;
67961-
67962- /* As VM_GROWSDOWN but s/below/above/ */
67963- if (next && next->vm_start == address + PAGE_SIZE)
67964- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
67965-
67966- expand_upwards(vma, address + PAGE_SIZE);
67967- }
67968- return 0;
67969-}
67970-
67971-/*
67972 * We enter with non-exclusive mmap_sem (to exclude vma changes,
67973 * but allow concurrent faults), and pte mapped but not yet locked.
67974 * We return with mmap_sem still held, but pte unmapped and unlocked.
67975@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_s
67976 unsigned long address, pte_t *page_table, pmd_t *pmd,
67977 unsigned int flags)
67978 {
67979- struct page *page;
67980+ struct page *page = NULL;
67981 spinlock_t *ptl;
67982 pte_t entry;
67983
67984- pte_unmap(page_table);
67985-
67986- /* Check if we need to add a guard page to the stack */
67987- if (check_stack_guard_page(vma, address) < 0)
67988- return VM_FAULT_SIGBUS;
67989-
67990- /* Use the zero-page for reads */
67991 if (!(flags & FAULT_FLAG_WRITE)) {
67992 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
67993 vma->vm_page_prot));
67994- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
67995+ ptl = pte_lockptr(mm, pmd);
67996+ spin_lock(ptl);
67997 if (!pte_none(*page_table))
67998 goto unlock;
67999 goto setpte;
68000 }
68001
68002 /* Allocate our own private page. */
68003+ pte_unmap(page_table);
68004+
68005 if (unlikely(anon_vma_prepare(vma)))
68006 goto oom;
68007 page = alloc_zeroed_user_highpage_movable(vma, address);
68008@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_s
68009 if (!pte_none(*page_table))
68010 goto release;
68011
68012+#ifdef CONFIG_PAX_SEGMEXEC
68013+ if (pax_find_mirror_vma(vma))
68014+ BUG_ON(!trylock_page(page));
68015+#endif
68016+
68017 inc_mm_counter(mm, anon_rss);
68018 page_add_new_anon_rmap(page, vma, address);
68019 setpte:
68020@@ -2720,6 +2911,12 @@ setpte:
68021
68022 /* No need to invalidate - it was non-present before */
68023 update_mmu_cache(vma, address, entry);
68024+
68025+#ifdef CONFIG_PAX_SEGMEXEC
68026+ if (page)
68027+ pax_mirror_anon_pte(vma, address, page, ptl);
68028+#endif
68029+
68030 unlock:
68031 pte_unmap_unlock(page_table, ptl);
68032 return 0;
68033@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *
68034 */
68035 /* Only go through if we didn't race with anybody else... */
68036 if (likely(pte_same(*page_table, orig_pte))) {
68037+
68038+#ifdef CONFIG_PAX_SEGMEXEC
68039+ if (anon && pax_find_mirror_vma(vma))
68040+ BUG_ON(!trylock_page(page));
68041+#endif
68042+
68043 flush_icache_page(vma, page);
68044 entry = mk_pte(page, vma->vm_page_prot);
68045 if (flags & FAULT_FLAG_WRITE)
68046@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *
68047
68048 /* no need to invalidate: a not-present page won't be cached */
68049 update_mmu_cache(vma, address, entry);
68050+
68051+#ifdef CONFIG_PAX_SEGMEXEC
68052+ if (anon)
68053+ pax_mirror_anon_pte(vma, address, page, ptl);
68054+ else
68055+ pax_mirror_file_pte(vma, address, page, ptl);
68056+#endif
68057+
68058 } else {
68059 if (charged)
68060 mem_cgroup_uncharge_page(page);
68061@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struc
68062 if (flags & FAULT_FLAG_WRITE)
68063 flush_tlb_page(vma, address);
68064 }
68065+
68066+#ifdef CONFIG_PAX_SEGMEXEC
68067+ pax_mirror_pte(vma, address, pte, pmd, ptl);
68068+ return 0;
68069+#endif
68070+
68071 unlock:
68072 pte_unmap_unlock(pte, ptl);
68073 return 0;
68074@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm
68075 pmd_t *pmd;
68076 pte_t *pte;
68077
68078+#ifdef CONFIG_PAX_SEGMEXEC
68079+ struct vm_area_struct *vma_m;
68080+#endif
68081+
68082 __set_current_state(TASK_RUNNING);
68083
68084 count_vm_event(PGFAULT);
68085@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm
68086 if (unlikely(is_vm_hugetlb_page(vma)))
68087 return hugetlb_fault(mm, vma, address, flags);
68088
68089+#ifdef CONFIG_PAX_SEGMEXEC
68090+ vma_m = pax_find_mirror_vma(vma);
68091+ if (vma_m) {
68092+ unsigned long address_m;
68093+ pgd_t *pgd_m;
68094+ pud_t *pud_m;
68095+ pmd_t *pmd_m;
68096+
68097+ if (vma->vm_start > vma_m->vm_start) {
68098+ address_m = address;
68099+ address -= SEGMEXEC_TASK_SIZE;
68100+ vma = vma_m;
68101+ } else
68102+ address_m = address + SEGMEXEC_TASK_SIZE;
68103+
68104+ pgd_m = pgd_offset(mm, address_m);
68105+ pud_m = pud_alloc(mm, pgd_m, address_m);
68106+ if (!pud_m)
68107+ return VM_FAULT_OOM;
68108+ pmd_m = pmd_alloc(mm, pud_m, address_m);
68109+ if (!pmd_m)
68110+ return VM_FAULT_OOM;
68111+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
68112+ return VM_FAULT_OOM;
68113+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
68114+ }
68115+#endif
68116+
68117 pgd = pgd_offset(mm, address);
68118 pud = pud_alloc(mm, pgd, address);
68119 if (!pud)
68120@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
68121 gate_vma.vm_start = FIXADDR_USER_START;
68122 gate_vma.vm_end = FIXADDR_USER_END;
68123 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68124- gate_vma.vm_page_prot = __P101;
68125+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68126 /*
68127 * Make sure the vDSO gets into every core dump.
68128 * Dumping its contents makes post-mortem fully interpretable later
68129diff -urNp linux-2.6.32.45/mm/memory-failure.c linux-2.6.32.45/mm/memory-failure.c
68130--- linux-2.6.32.45/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
68131+++ linux-2.6.32.45/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
68132@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
68133
68134 int sysctl_memory_failure_recovery __read_mostly = 1;
68135
68136-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68137+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68138
68139 /*
68140 * Send all the processes who have the page mapped an ``action optional''
68141@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
68142 return 0;
68143 }
68144
68145- atomic_long_add(1, &mce_bad_pages);
68146+ atomic_long_add_unchecked(1, &mce_bad_pages);
68147
68148 /*
68149 * We need/can do nothing about count=0 pages.
68150diff -urNp linux-2.6.32.45/mm/mempolicy.c linux-2.6.32.45/mm/mempolicy.c
68151--- linux-2.6.32.45/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
68152+++ linux-2.6.32.45/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
68153@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
68154 struct vm_area_struct *next;
68155 int err;
68156
68157+#ifdef CONFIG_PAX_SEGMEXEC
68158+ struct vm_area_struct *vma_m;
68159+#endif
68160+
68161 err = 0;
68162 for (; vma && vma->vm_start < end; vma = next) {
68163 next = vma->vm_next;
68164@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
68165 err = policy_vma(vma, new);
68166 if (err)
68167 break;
68168+
68169+#ifdef CONFIG_PAX_SEGMEXEC
68170+ vma_m = pax_find_mirror_vma(vma);
68171+ if (vma_m) {
68172+ err = policy_vma(vma_m, new);
68173+ if (err)
68174+ break;
68175+ }
68176+#endif
68177+
68178 }
68179 return err;
68180 }
68181@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
68182
68183 if (end < start)
68184 return -EINVAL;
68185+
68186+#ifdef CONFIG_PAX_SEGMEXEC
68187+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68188+ if (end > SEGMEXEC_TASK_SIZE)
68189+ return -EINVAL;
68190+ } else
68191+#endif
68192+
68193+ if (end > TASK_SIZE)
68194+ return -EINVAL;
68195+
68196 if (end == start)
68197 return 0;
68198
68199@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68200 if (!mm)
68201 return -EINVAL;
68202
68203+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68204+ if (mm != current->mm &&
68205+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68206+ err = -EPERM;
68207+ goto out;
68208+ }
68209+#endif
68210+
68211 /*
68212 * Check if this process has the right to modify the specified
68213 * process. The right exists if the process has administrative
68214@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68215 rcu_read_lock();
68216 tcred = __task_cred(task);
68217 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68218- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68219- !capable(CAP_SYS_NICE)) {
68220+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68221 rcu_read_unlock();
68222 err = -EPERM;
68223 goto out;
68224@@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
68225
68226 if (file) {
68227 seq_printf(m, " file=");
68228- seq_path(m, &file->f_path, "\n\t= ");
68229+ seq_path(m, &file->f_path, "\n\t\\= ");
68230 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
68231 seq_printf(m, " heap");
68232 } else if (vma->vm_start <= mm->start_stack &&
68233diff -urNp linux-2.6.32.45/mm/migrate.c linux-2.6.32.45/mm/migrate.c
68234--- linux-2.6.32.45/mm/migrate.c 2011-07-13 17:23:04.000000000 -0400
68235+++ linux-2.6.32.45/mm/migrate.c 2011-07-13 17:23:23.000000000 -0400
68236@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
68237 unsigned long chunk_start;
68238 int err;
68239
68240+ pax_track_stack();
68241+
68242 task_nodes = cpuset_mems_allowed(task);
68243
68244 err = -ENOMEM;
68245@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68246 if (!mm)
68247 return -EINVAL;
68248
68249+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68250+ if (mm != current->mm &&
68251+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68252+ err = -EPERM;
68253+ goto out;
68254+ }
68255+#endif
68256+
68257 /*
68258 * Check if this process has the right to modify the specified
68259 * process. The right exists if the process has administrative
68260@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68261 rcu_read_lock();
68262 tcred = __task_cred(task);
68263 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68264- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68265- !capable(CAP_SYS_NICE)) {
68266+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68267 rcu_read_unlock();
68268 err = -EPERM;
68269 goto out;
68270diff -urNp linux-2.6.32.45/mm/mlock.c linux-2.6.32.45/mm/mlock.c
68271--- linux-2.6.32.45/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
68272+++ linux-2.6.32.45/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
68273@@ -13,6 +13,7 @@
68274 #include <linux/pagemap.h>
68275 #include <linux/mempolicy.h>
68276 #include <linux/syscalls.h>
68277+#include <linux/security.h>
68278 #include <linux/sched.h>
68279 #include <linux/module.h>
68280 #include <linux/rmap.h>
68281@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
68282 }
68283 }
68284
68285-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
68286-{
68287- return (vma->vm_flags & VM_GROWSDOWN) &&
68288- (vma->vm_start == addr) &&
68289- !vma_stack_continue(vma->vm_prev, addr);
68290-}
68291-
68292 /**
68293 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
68294 * @vma: target vma
68295@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
68296 if (vma->vm_flags & VM_WRITE)
68297 gup_flags |= FOLL_WRITE;
68298
68299- /* We don't try to access the guard page of a stack vma */
68300- if (stack_guard_page(vma, start)) {
68301- addr += PAGE_SIZE;
68302- nr_pages--;
68303- }
68304-
68305 while (nr_pages > 0) {
68306 int i;
68307
68308@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
68309 {
68310 unsigned long nstart, end, tmp;
68311 struct vm_area_struct * vma, * prev;
68312- int error;
68313+ int error = -EINVAL;
68314
68315 len = PAGE_ALIGN(len);
68316 end = start + len;
68317@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
68318 return -EINVAL;
68319 if (end == start)
68320 return 0;
68321+ if (end > TASK_SIZE)
68322+ return -EINVAL;
68323+
68324 vma = find_vma_prev(current->mm, start, &prev);
68325 if (!vma || vma->vm_start > start)
68326 return -ENOMEM;
68327@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
68328 for (nstart = start ; ; ) {
68329 unsigned int newflags;
68330
68331+#ifdef CONFIG_PAX_SEGMEXEC
68332+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68333+ break;
68334+#endif
68335+
68336 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68337
68338 newflags = vma->vm_flags | VM_LOCKED;
68339@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
68340 lock_limit >>= PAGE_SHIFT;
68341
68342 /* check against resource limits */
68343+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68344 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68345 error = do_mlock(start, len, 1);
68346 up_write(&current->mm->mmap_sem);
68347@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
68348 static int do_mlockall(int flags)
68349 {
68350 struct vm_area_struct * vma, * prev = NULL;
68351- unsigned int def_flags = 0;
68352
68353 if (flags & MCL_FUTURE)
68354- def_flags = VM_LOCKED;
68355- current->mm->def_flags = def_flags;
68356+ current->mm->def_flags |= VM_LOCKED;
68357+ else
68358+ current->mm->def_flags &= ~VM_LOCKED;
68359 if (flags == MCL_FUTURE)
68360 goto out;
68361
68362 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68363- unsigned int newflags;
68364+ unsigned long newflags;
68365+
68366+#ifdef CONFIG_PAX_SEGMEXEC
68367+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68368+ break;
68369+#endif
68370
68371+ BUG_ON(vma->vm_end > TASK_SIZE);
68372 newflags = vma->vm_flags | VM_LOCKED;
68373 if (!(flags & MCL_CURRENT))
68374 newflags &= ~VM_LOCKED;
68375@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68376 lock_limit >>= PAGE_SHIFT;
68377
68378 ret = -ENOMEM;
68379+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68380 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68381 capable(CAP_IPC_LOCK))
68382 ret = do_mlockall(flags);
68383diff -urNp linux-2.6.32.45/mm/mmap.c linux-2.6.32.45/mm/mmap.c
68384--- linux-2.6.32.45/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
68385+++ linux-2.6.32.45/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
68386@@ -45,6 +45,16 @@
68387 #define arch_rebalance_pgtables(addr, len) (addr)
68388 #endif
68389
68390+static inline void verify_mm_writelocked(struct mm_struct *mm)
68391+{
68392+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68393+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68394+ up_read(&mm->mmap_sem);
68395+ BUG();
68396+ }
68397+#endif
68398+}
68399+
68400 static void unmap_region(struct mm_struct *mm,
68401 struct vm_area_struct *vma, struct vm_area_struct *prev,
68402 unsigned long start, unsigned long end);
68403@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
68404 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68405 *
68406 */
68407-pgprot_t protection_map[16] = {
68408+pgprot_t protection_map[16] __read_only = {
68409 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68410 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68411 };
68412
68413 pgprot_t vm_get_page_prot(unsigned long vm_flags)
68414 {
68415- return __pgprot(pgprot_val(protection_map[vm_flags &
68416+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68417 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68418 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68419+
68420+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68421+ if (!nx_enabled &&
68422+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68423+ (vm_flags & (VM_READ | VM_WRITE)))
68424+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68425+#endif
68426+
68427+ return prot;
68428 }
68429 EXPORT_SYMBOL(vm_get_page_prot);
68430
68431 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
68432 int sysctl_overcommit_ratio = 50; /* default is 50% */
68433 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68434+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68435 struct percpu_counter vm_committed_as;
68436
68437 /*
68438@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
68439 struct vm_area_struct *next = vma->vm_next;
68440
68441 might_sleep();
68442+ BUG_ON(vma->vm_mirror);
68443 if (vma->vm_ops && vma->vm_ops->close)
68444 vma->vm_ops->close(vma);
68445 if (vma->vm_file) {
68446@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68447 * not page aligned -Ram Gupta
68448 */
68449 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
68450+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68451 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68452 (mm->end_data - mm->start_data) > rlim)
68453 goto out;
68454@@ -704,6 +726,12 @@ static int
68455 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68456 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68457 {
68458+
68459+#ifdef CONFIG_PAX_SEGMEXEC
68460+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68461+ return 0;
68462+#endif
68463+
68464 if (is_mergeable_vma(vma, file, vm_flags) &&
68465 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68466 if (vma->vm_pgoff == vm_pgoff)
68467@@ -723,6 +751,12 @@ static int
68468 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68469 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68470 {
68471+
68472+#ifdef CONFIG_PAX_SEGMEXEC
68473+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68474+ return 0;
68475+#endif
68476+
68477 if (is_mergeable_vma(vma, file, vm_flags) &&
68478 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68479 pgoff_t vm_pglen;
68480@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
68481 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68482 struct vm_area_struct *prev, unsigned long addr,
68483 unsigned long end, unsigned long vm_flags,
68484- struct anon_vma *anon_vma, struct file *file,
68485+ struct anon_vma *anon_vma, struct file *file,
68486 pgoff_t pgoff, struct mempolicy *policy)
68487 {
68488 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68489 struct vm_area_struct *area, *next;
68490
68491+#ifdef CONFIG_PAX_SEGMEXEC
68492+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68493+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68494+
68495+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68496+#endif
68497+
68498 /*
68499 * We later require that vma->vm_flags == vm_flags,
68500 * so this tests vma->vm_flags & VM_SPECIAL, too.
68501@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
68502 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68503 next = next->vm_next;
68504
68505+#ifdef CONFIG_PAX_SEGMEXEC
68506+ if (prev)
68507+ prev_m = pax_find_mirror_vma(prev);
68508+ if (area)
68509+ area_m = pax_find_mirror_vma(area);
68510+ if (next)
68511+ next_m = pax_find_mirror_vma(next);
68512+#endif
68513+
68514 /*
68515 * Can it merge with the predecessor?
68516 */
68517@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
68518 /* cases 1, 6 */
68519 vma_adjust(prev, prev->vm_start,
68520 next->vm_end, prev->vm_pgoff, NULL);
68521- } else /* cases 2, 5, 7 */
68522+
68523+#ifdef CONFIG_PAX_SEGMEXEC
68524+ if (prev_m)
68525+ vma_adjust(prev_m, prev_m->vm_start,
68526+ next_m->vm_end, prev_m->vm_pgoff, NULL);
68527+#endif
68528+
68529+ } else { /* cases 2, 5, 7 */
68530 vma_adjust(prev, prev->vm_start,
68531 end, prev->vm_pgoff, NULL);
68532+
68533+#ifdef CONFIG_PAX_SEGMEXEC
68534+ if (prev_m)
68535+ vma_adjust(prev_m, prev_m->vm_start,
68536+ end_m, prev_m->vm_pgoff, NULL);
68537+#endif
68538+
68539+ }
68540 return prev;
68541 }
68542
68543@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
68544 mpol_equal(policy, vma_policy(next)) &&
68545 can_vma_merge_before(next, vm_flags,
68546 anon_vma, file, pgoff+pglen)) {
68547- if (prev && addr < prev->vm_end) /* case 4 */
68548+ if (prev && addr < prev->vm_end) { /* case 4 */
68549 vma_adjust(prev, prev->vm_start,
68550 addr, prev->vm_pgoff, NULL);
68551- else /* cases 3, 8 */
68552+
68553+#ifdef CONFIG_PAX_SEGMEXEC
68554+ if (prev_m)
68555+ vma_adjust(prev_m, prev_m->vm_start,
68556+ addr_m, prev_m->vm_pgoff, NULL);
68557+#endif
68558+
68559+ } else { /* cases 3, 8 */
68560 vma_adjust(area, addr, next->vm_end,
68561 next->vm_pgoff - pglen, NULL);
68562+
68563+#ifdef CONFIG_PAX_SEGMEXEC
68564+ if (area_m)
68565+ vma_adjust(area_m, addr_m, next_m->vm_end,
68566+ next_m->vm_pgoff - pglen, NULL);
68567+#endif
68568+
68569+ }
68570 return area;
68571 }
68572
68573@@ -898,14 +978,11 @@ none:
68574 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68575 struct file *file, long pages)
68576 {
68577- const unsigned long stack_flags
68578- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68579-
68580 if (file) {
68581 mm->shared_vm += pages;
68582 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68583 mm->exec_vm += pages;
68584- } else if (flags & stack_flags)
68585+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68586 mm->stack_vm += pages;
68587 if (flags & (VM_RESERVED|VM_IO))
68588 mm->reserved_vm += pages;
68589@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
68590 * (the exception is when the underlying filesystem is noexec
68591 * mounted, in which case we dont add PROT_EXEC.)
68592 */
68593- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68594+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68595 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68596 prot |= PROT_EXEC;
68597
68598@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
68599 /* Obtain the address to map to. we verify (or select) it and ensure
68600 * that it represents a valid section of the address space.
68601 */
68602- addr = get_unmapped_area(file, addr, len, pgoff, flags);
68603+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68604 if (addr & ~PAGE_MASK)
68605 return addr;
68606
68607@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
68608 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68609 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68610
68611+#ifdef CONFIG_PAX_MPROTECT
68612+ if (mm->pax_flags & MF_PAX_MPROTECT) {
68613+#ifndef CONFIG_PAX_MPROTECT_COMPAT
68614+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68615+ gr_log_rwxmmap(file);
68616+
68617+#ifdef CONFIG_PAX_EMUPLT
68618+ vm_flags &= ~VM_EXEC;
68619+#else
68620+ return -EPERM;
68621+#endif
68622+
68623+ }
68624+
68625+ if (!(vm_flags & VM_EXEC))
68626+ vm_flags &= ~VM_MAYEXEC;
68627+#else
68628+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68629+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68630+#endif
68631+ else
68632+ vm_flags &= ~VM_MAYWRITE;
68633+ }
68634+#endif
68635+
68636+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68637+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68638+ vm_flags &= ~VM_PAGEEXEC;
68639+#endif
68640+
68641 if (flags & MAP_LOCKED)
68642 if (!can_do_mlock())
68643 return -EPERM;
68644@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
68645 locked += mm->locked_vm;
68646 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
68647 lock_limit >>= PAGE_SHIFT;
68648+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68649 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68650 return -EAGAIN;
68651 }
68652@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
68653 if (error)
68654 return error;
68655
68656+ if (!gr_acl_handle_mmap(file, prot))
68657+ return -EACCES;
68658+
68659 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68660 }
68661 EXPORT_SYMBOL(do_mmap_pgoff);
68662@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
68663 */
68664 int vma_wants_writenotify(struct vm_area_struct *vma)
68665 {
68666- unsigned int vm_flags = vma->vm_flags;
68667+ unsigned long vm_flags = vma->vm_flags;
68668
68669 /* If it was private or non-writable, the write bit is already clear */
68670- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68671+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68672 return 0;
68673
68674 /* The backer wishes to know when pages are first written to? */
68675@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
68676 unsigned long charged = 0;
68677 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68678
68679+#ifdef CONFIG_PAX_SEGMEXEC
68680+ struct vm_area_struct *vma_m = NULL;
68681+#endif
68682+
68683+ /*
68684+ * mm->mmap_sem is required to protect against another thread
68685+ * changing the mappings in case we sleep.
68686+ */
68687+ verify_mm_writelocked(mm);
68688+
68689 /* Clear old maps */
68690 error = -ENOMEM;
68691-munmap_back:
68692 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68693 if (vma && vma->vm_start < addr + len) {
68694 if (do_munmap(mm, addr, len))
68695 return -ENOMEM;
68696- goto munmap_back;
68697+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68698+ BUG_ON(vma && vma->vm_start < addr + len);
68699 }
68700
68701 /* Check against address space limit. */
68702@@ -1173,6 +1294,16 @@ munmap_back:
68703 goto unacct_error;
68704 }
68705
68706+#ifdef CONFIG_PAX_SEGMEXEC
68707+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68708+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68709+ if (!vma_m) {
68710+ error = -ENOMEM;
68711+ goto free_vma;
68712+ }
68713+ }
68714+#endif
68715+
68716 vma->vm_mm = mm;
68717 vma->vm_start = addr;
68718 vma->vm_end = addr + len;
68719@@ -1195,6 +1326,19 @@ munmap_back:
68720 error = file->f_op->mmap(file, vma);
68721 if (error)
68722 goto unmap_and_free_vma;
68723+
68724+#ifdef CONFIG_PAX_SEGMEXEC
68725+ if (vma_m && (vm_flags & VM_EXECUTABLE))
68726+ added_exe_file_vma(mm);
68727+#endif
68728+
68729+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68730+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68731+ vma->vm_flags |= VM_PAGEEXEC;
68732+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68733+ }
68734+#endif
68735+
68736 if (vm_flags & VM_EXECUTABLE)
68737 added_exe_file_vma(mm);
68738
68739@@ -1218,6 +1362,11 @@ munmap_back:
68740 vma_link(mm, vma, prev, rb_link, rb_parent);
68741 file = vma->vm_file;
68742
68743+#ifdef CONFIG_PAX_SEGMEXEC
68744+ if (vma_m)
68745+ pax_mirror_vma(vma_m, vma);
68746+#endif
68747+
68748 /* Once vma denies write, undo our temporary denial count */
68749 if (correct_wcount)
68750 atomic_inc(&inode->i_writecount);
68751@@ -1226,6 +1375,7 @@ out:
68752
68753 mm->total_vm += len >> PAGE_SHIFT;
68754 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68755+ track_exec_limit(mm, addr, addr + len, vm_flags);
68756 if (vm_flags & VM_LOCKED) {
68757 /*
68758 * makes pages present; downgrades, drops, reacquires mmap_sem
68759@@ -1248,6 +1398,12 @@ unmap_and_free_vma:
68760 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68761 charged = 0;
68762 free_vma:
68763+
68764+#ifdef CONFIG_PAX_SEGMEXEC
68765+ if (vma_m)
68766+ kmem_cache_free(vm_area_cachep, vma_m);
68767+#endif
68768+
68769 kmem_cache_free(vm_area_cachep, vma);
68770 unacct_error:
68771 if (charged)
68772@@ -1255,6 +1411,44 @@ unacct_error:
68773 return error;
68774 }
68775
68776+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
68777+{
68778+ if (!vma) {
68779+#ifdef CONFIG_STACK_GROWSUP
68780+ if (addr > sysctl_heap_stack_gap)
68781+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
68782+ else
68783+ vma = find_vma(current->mm, 0);
68784+ if (vma && (vma->vm_flags & VM_GROWSUP))
68785+ return false;
68786+#endif
68787+ return true;
68788+ }
68789+
68790+ if (addr + len > vma->vm_start)
68791+ return false;
68792+
68793+ if (vma->vm_flags & VM_GROWSDOWN)
68794+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
68795+#ifdef CONFIG_STACK_GROWSUP
68796+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
68797+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
68798+#endif
68799+
68800+ return true;
68801+}
68802+
68803+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
68804+{
68805+ if (vma->vm_start < len)
68806+ return -ENOMEM;
68807+ if (!(vma->vm_flags & VM_GROWSDOWN))
68808+ return vma->vm_start - len;
68809+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
68810+ return vma->vm_start - len - sysctl_heap_stack_gap;
68811+ return -ENOMEM;
68812+}
68813+
68814 /* Get an address range which is currently unmapped.
68815 * For shmat() with addr=0.
68816 *
68817@@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
68818 if (flags & MAP_FIXED)
68819 return addr;
68820
68821+#ifdef CONFIG_PAX_RANDMMAP
68822+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68823+#endif
68824+
68825 if (addr) {
68826 addr = PAGE_ALIGN(addr);
68827- vma = find_vma(mm, addr);
68828- if (TASK_SIZE - len >= addr &&
68829- (!vma || addr + len <= vma->vm_start))
68830- return addr;
68831+ if (TASK_SIZE - len >= addr) {
68832+ vma = find_vma(mm, addr);
68833+ if (check_heap_stack_gap(vma, addr, len))
68834+ return addr;
68835+ }
68836 }
68837 if (len > mm->cached_hole_size) {
68838- start_addr = addr = mm->free_area_cache;
68839+ start_addr = addr = mm->free_area_cache;
68840 } else {
68841- start_addr = addr = TASK_UNMAPPED_BASE;
68842- mm->cached_hole_size = 0;
68843+ start_addr = addr = mm->mmap_base;
68844+ mm->cached_hole_size = 0;
68845 }
68846
68847 full_search:
68848@@ -1303,34 +1502,40 @@ full_search:
68849 * Start a new search - just in case we missed
68850 * some holes.
68851 */
68852- if (start_addr != TASK_UNMAPPED_BASE) {
68853- addr = TASK_UNMAPPED_BASE;
68854- start_addr = addr;
68855+ if (start_addr != mm->mmap_base) {
68856+ start_addr = addr = mm->mmap_base;
68857 mm->cached_hole_size = 0;
68858 goto full_search;
68859 }
68860 return -ENOMEM;
68861 }
68862- if (!vma || addr + len <= vma->vm_start) {
68863- /*
68864- * Remember the place where we stopped the search:
68865- */
68866- mm->free_area_cache = addr + len;
68867- return addr;
68868- }
68869+ if (check_heap_stack_gap(vma, addr, len))
68870+ break;
68871 if (addr + mm->cached_hole_size < vma->vm_start)
68872 mm->cached_hole_size = vma->vm_start - addr;
68873 addr = vma->vm_end;
68874 }
68875+
68876+ /*
68877+ * Remember the place where we stopped the search:
68878+ */
68879+ mm->free_area_cache = addr + len;
68880+ return addr;
68881 }
68882 #endif
68883
68884 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
68885 {
68886+
68887+#ifdef CONFIG_PAX_SEGMEXEC
68888+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68889+ return;
68890+#endif
68891+
68892 /*
68893 * Is this a new hole at the lowest possible address?
68894 */
68895- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
68896+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
68897 mm->free_area_cache = addr;
68898 mm->cached_hole_size = ~0UL;
68899 }
68900@@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
68901 {
68902 struct vm_area_struct *vma;
68903 struct mm_struct *mm = current->mm;
68904- unsigned long addr = addr0;
68905+ unsigned long base = mm->mmap_base, addr = addr0;
68906
68907 /* requested length too big for entire address space */
68908 if (len > TASK_SIZE)
68909@@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
68910 if (flags & MAP_FIXED)
68911 return addr;
68912
68913+#ifdef CONFIG_PAX_RANDMMAP
68914+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
68915+#endif
68916+
68917 /* requesting a specific address */
68918 if (addr) {
68919 addr = PAGE_ALIGN(addr);
68920- vma = find_vma(mm, addr);
68921- if (TASK_SIZE - len >= addr &&
68922- (!vma || addr + len <= vma->vm_start))
68923- return addr;
68924+ if (TASK_SIZE - len >= addr) {
68925+ vma = find_vma(mm, addr);
68926+ if (check_heap_stack_gap(vma, addr, len))
68927+ return addr;
68928+ }
68929 }
68930
68931 /* check if free_area_cache is useful for us */
68932@@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
68933 /* make sure it can fit in the remaining address space */
68934 if (addr > len) {
68935 vma = find_vma(mm, addr-len);
68936- if (!vma || addr <= vma->vm_start)
68937+ if (check_heap_stack_gap(vma, addr - len, len))
68938 /* remember the address as a hint for next time */
68939 return (mm->free_area_cache = addr-len);
68940 }
68941@@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
68942 * return with success:
68943 */
68944 vma = find_vma(mm, addr);
68945- if (!vma || addr+len <= vma->vm_start)
68946+ if (check_heap_stack_gap(vma, addr, len))
68947 /* remember the address as a hint for next time */
68948 return (mm->free_area_cache = addr);
68949
68950@@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
68951 mm->cached_hole_size = vma->vm_start - addr;
68952
68953 /* try just below the current vma->vm_start */
68954- addr = vma->vm_start-len;
68955- } while (len < vma->vm_start);
68956+ addr = skip_heap_stack_gap(vma, len);
68957+ } while (!IS_ERR_VALUE(addr));
68958
68959 bottomup:
68960 /*
68961@@ -1414,13 +1624,21 @@ bottomup:
68962 * can happen with large stack limits and large mmap()
68963 * allocations.
68964 */
68965+ mm->mmap_base = TASK_UNMAPPED_BASE;
68966+
68967+#ifdef CONFIG_PAX_RANDMMAP
68968+ if (mm->pax_flags & MF_PAX_RANDMMAP)
68969+ mm->mmap_base += mm->delta_mmap;
68970+#endif
68971+
68972+ mm->free_area_cache = mm->mmap_base;
68973 mm->cached_hole_size = ~0UL;
68974- mm->free_area_cache = TASK_UNMAPPED_BASE;
68975 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
68976 /*
68977 * Restore the topdown base:
68978 */
68979- mm->free_area_cache = mm->mmap_base;
68980+ mm->mmap_base = base;
68981+ mm->free_area_cache = base;
68982 mm->cached_hole_size = ~0UL;
68983
68984 return addr;
68985@@ -1429,6 +1647,12 @@ bottomup:
68986
68987 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
68988 {
68989+
68990+#ifdef CONFIG_PAX_SEGMEXEC
68991+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
68992+ return;
68993+#endif
68994+
68995 /*
68996 * Is this a new hole at the highest possible address?
68997 */
68998@@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
68999 mm->free_area_cache = addr;
69000
69001 /* dont allow allocations above current base */
69002- if (mm->free_area_cache > mm->mmap_base)
69003+ if (mm->free_area_cache > mm->mmap_base) {
69004 mm->free_area_cache = mm->mmap_base;
69005+ mm->cached_hole_size = ~0UL;
69006+ }
69007 }
69008
69009 unsigned long
69010@@ -1545,6 +1771,27 @@ out:
69011 return prev ? prev->vm_next : vma;
69012 }
69013
69014+#ifdef CONFIG_PAX_SEGMEXEC
69015+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
69016+{
69017+ struct vm_area_struct *vma_m;
69018+
69019+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
69020+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
69021+ BUG_ON(vma->vm_mirror);
69022+ return NULL;
69023+ }
69024+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
69025+ vma_m = vma->vm_mirror;
69026+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
69027+ BUG_ON(vma->vm_file != vma_m->vm_file);
69028+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
69029+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
69030+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
69031+ return vma_m;
69032+}
69033+#endif
69034+
69035 /*
69036 * Verify that the stack growth is acceptable and
69037 * update accounting. This is shared with both the
69038@@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
69039 return -ENOMEM;
69040
69041 /* Stack limit test */
69042+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
69043 if (size > rlim[RLIMIT_STACK].rlim_cur)
69044 return -ENOMEM;
69045
69046@@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
69047 unsigned long limit;
69048 locked = mm->locked_vm + grow;
69049 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
69050+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69051 if (locked > limit && !capable(CAP_IPC_LOCK))
69052 return -ENOMEM;
69053 }
69054@@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
69055 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
69056 * vma is the last one with address > vma->vm_end. Have to extend vma.
69057 */
69058+#ifndef CONFIG_IA64
69059+static
69060+#endif
69061 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69062 {
69063 int error;
69064+ bool locknext;
69065
69066 if (!(vma->vm_flags & VM_GROWSUP))
69067 return -EFAULT;
69068
69069+ /* Also guard against wrapping around to address 0. */
69070+ if (address < PAGE_ALIGN(address+1))
69071+ address = PAGE_ALIGN(address+1);
69072+ else
69073+ return -ENOMEM;
69074+
69075 /*
69076 * We must make sure the anon_vma is allocated
69077 * so that the anon_vma locking is not a noop.
69078 */
69079 if (unlikely(anon_vma_prepare(vma)))
69080 return -ENOMEM;
69081+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
69082+ if (locknext && anon_vma_prepare(vma->vm_next))
69083+ return -ENOMEM;
69084 anon_vma_lock(vma);
69085+ if (locknext)
69086+ anon_vma_lock(vma->vm_next);
69087
69088 /*
69089 * vma->vm_start/vm_end cannot change under us because the caller
69090 * is required to hold the mmap_sem in read mode. We need the
69091- * anon_vma lock to serialize against concurrent expand_stacks.
69092- * Also guard against wrapping around to address 0.
69093+ * anon_vma locks to serialize against concurrent expand_stacks
69094+ * and expand_upwards.
69095 */
69096- if (address < PAGE_ALIGN(address+4))
69097- address = PAGE_ALIGN(address+4);
69098- else {
69099- anon_vma_unlock(vma);
69100- return -ENOMEM;
69101- }
69102 error = 0;
69103
69104 /* Somebody else might have raced and expanded it already */
69105- if (address > vma->vm_end) {
69106+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
69107+ error = -ENOMEM;
69108+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
69109 unsigned long size, grow;
69110
69111 size = address - vma->vm_start;
69112@@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
69113 if (!error)
69114 vma->vm_end = address;
69115 }
69116+ if (locknext)
69117+ anon_vma_unlock(vma->vm_next);
69118 anon_vma_unlock(vma);
69119 return error;
69120 }
69121@@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
69122 unsigned long address)
69123 {
69124 int error;
69125+ bool lockprev = false;
69126+ struct vm_area_struct *prev;
69127
69128 /*
69129 * We must make sure the anon_vma is allocated
69130@@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
69131 if (error)
69132 return error;
69133
69134+ prev = vma->vm_prev;
69135+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
69136+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
69137+#endif
69138+ if (lockprev && anon_vma_prepare(prev))
69139+ return -ENOMEM;
69140+ if (lockprev)
69141+ anon_vma_lock(prev);
69142+
69143 anon_vma_lock(vma);
69144
69145 /*
69146@@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
69147 */
69148
69149 /* Somebody else might have raced and expanded it already */
69150- if (address < vma->vm_start) {
69151+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69152+ error = -ENOMEM;
69153+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69154 unsigned long size, grow;
69155
69156+#ifdef CONFIG_PAX_SEGMEXEC
69157+ struct vm_area_struct *vma_m;
69158+
69159+ vma_m = pax_find_mirror_vma(vma);
69160+#endif
69161+
69162 size = vma->vm_end - address;
69163 grow = (vma->vm_start - address) >> PAGE_SHIFT;
69164
69165@@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
69166 if (!error) {
69167 vma->vm_start = address;
69168 vma->vm_pgoff -= grow;
69169+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69170+
69171+#ifdef CONFIG_PAX_SEGMEXEC
69172+ if (vma_m) {
69173+ vma_m->vm_start -= grow << PAGE_SHIFT;
69174+ vma_m->vm_pgoff -= grow;
69175+ }
69176+#endif
69177+
69178 }
69179 }
69180 anon_vma_unlock(vma);
69181+ if (lockprev)
69182+ anon_vma_unlock(prev);
69183 return error;
69184 }
69185
69186@@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
69187 do {
69188 long nrpages = vma_pages(vma);
69189
69190+#ifdef CONFIG_PAX_SEGMEXEC
69191+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69192+ vma = remove_vma(vma);
69193+ continue;
69194+ }
69195+#endif
69196+
69197 mm->total_vm -= nrpages;
69198 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69199 vma = remove_vma(vma);
69200@@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
69201 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69202 vma->vm_prev = NULL;
69203 do {
69204+
69205+#ifdef CONFIG_PAX_SEGMEXEC
69206+ if (vma->vm_mirror) {
69207+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69208+ vma->vm_mirror->vm_mirror = NULL;
69209+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
69210+ vma->vm_mirror = NULL;
69211+ }
69212+#endif
69213+
69214 rb_erase(&vma->vm_rb, &mm->mm_rb);
69215 mm->map_count--;
69216 tail_vma = vma;
69217@@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
69218 struct mempolicy *pol;
69219 struct vm_area_struct *new;
69220
69221+#ifdef CONFIG_PAX_SEGMEXEC
69222+ struct vm_area_struct *vma_m, *new_m = NULL;
69223+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69224+#endif
69225+
69226 if (is_vm_hugetlb_page(vma) && (addr &
69227 ~(huge_page_mask(hstate_vma(vma)))))
69228 return -EINVAL;
69229
69230+#ifdef CONFIG_PAX_SEGMEXEC
69231+ vma_m = pax_find_mirror_vma(vma);
69232+
69233+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69234+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69235+ if (mm->map_count >= sysctl_max_map_count-1)
69236+ return -ENOMEM;
69237+ } else
69238+#endif
69239+
69240 if (mm->map_count >= sysctl_max_map_count)
69241 return -ENOMEM;
69242
69243@@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
69244 if (!new)
69245 return -ENOMEM;
69246
69247+#ifdef CONFIG_PAX_SEGMEXEC
69248+ if (vma_m) {
69249+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69250+ if (!new_m) {
69251+ kmem_cache_free(vm_area_cachep, new);
69252+ return -ENOMEM;
69253+ }
69254+ }
69255+#endif
69256+
69257 /* most fields are the same, copy all, and then fixup */
69258 *new = *vma;
69259
69260@@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
69261 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69262 }
69263
69264+#ifdef CONFIG_PAX_SEGMEXEC
69265+ if (vma_m) {
69266+ *new_m = *vma_m;
69267+ new_m->vm_mirror = new;
69268+ new->vm_mirror = new_m;
69269+
69270+ if (new_below)
69271+ new_m->vm_end = addr_m;
69272+ else {
69273+ new_m->vm_start = addr_m;
69274+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69275+ }
69276+ }
69277+#endif
69278+
69279 pol = mpol_dup(vma_policy(vma));
69280 if (IS_ERR(pol)) {
69281+
69282+#ifdef CONFIG_PAX_SEGMEXEC
69283+ if (new_m)
69284+ kmem_cache_free(vm_area_cachep, new_m);
69285+#endif
69286+
69287 kmem_cache_free(vm_area_cachep, new);
69288 return PTR_ERR(pol);
69289 }
69290@@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
69291 else
69292 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69293
69294+#ifdef CONFIG_PAX_SEGMEXEC
69295+ if (vma_m) {
69296+ mpol_get(pol);
69297+ vma_set_policy(new_m, pol);
69298+
69299+ if (new_m->vm_file) {
69300+ get_file(new_m->vm_file);
69301+ if (vma_m->vm_flags & VM_EXECUTABLE)
69302+ added_exe_file_vma(mm);
69303+ }
69304+
69305+ if (new_m->vm_ops && new_m->vm_ops->open)
69306+ new_m->vm_ops->open(new_m);
69307+
69308+ if (new_below)
69309+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69310+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69311+ else
69312+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69313+ }
69314+#endif
69315+
69316 return 0;
69317 }
69318
69319@@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
69320 * work. This now handles partial unmappings.
69321 * Jeremy Fitzhardinge <jeremy@goop.org>
69322 */
69323+#ifdef CONFIG_PAX_SEGMEXEC
69324+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69325+{
69326+ int ret = __do_munmap(mm, start, len);
69327+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69328+ return ret;
69329+
69330+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69331+}
69332+
69333+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69334+#else
69335 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69336+#endif
69337 {
69338 unsigned long end;
69339 struct vm_area_struct *vma, *prev, *last;
69340
69341+ /*
69342+ * mm->mmap_sem is required to protect against another thread
69343+ * changing the mappings in case we sleep.
69344+ */
69345+ verify_mm_writelocked(mm);
69346+
69347 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69348 return -EINVAL;
69349
69350@@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
69351 /* Fix up all other VM information */
69352 remove_vma_list(mm, vma);
69353
69354+ track_exec_limit(mm, start, end, 0UL);
69355+
69356 return 0;
69357 }
69358
69359@@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
69360
69361 profile_munmap(addr);
69362
69363+#ifdef CONFIG_PAX_SEGMEXEC
69364+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69365+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69366+ return -EINVAL;
69367+#endif
69368+
69369 down_write(&mm->mmap_sem);
69370 ret = do_munmap(mm, addr, len);
69371 up_write(&mm->mmap_sem);
69372 return ret;
69373 }
69374
69375-static inline void verify_mm_writelocked(struct mm_struct *mm)
69376-{
69377-#ifdef CONFIG_DEBUG_VM
69378- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69379- WARN_ON(1);
69380- up_read(&mm->mmap_sem);
69381- }
69382-#endif
69383-}
69384-
69385 /*
69386 * this is really a simplified "do_mmap". it only handles
69387 * anonymous maps. eventually we may be able to do some
69388@@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
69389 struct rb_node ** rb_link, * rb_parent;
69390 pgoff_t pgoff = addr >> PAGE_SHIFT;
69391 int error;
69392+ unsigned long charged;
69393
69394 len = PAGE_ALIGN(len);
69395 if (!len)
69396@@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
69397
69398 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69399
69400+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69401+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69402+ flags &= ~VM_EXEC;
69403+
69404+#ifdef CONFIG_PAX_MPROTECT
69405+ if (mm->pax_flags & MF_PAX_MPROTECT)
69406+ flags &= ~VM_MAYEXEC;
69407+#endif
69408+
69409+ }
69410+#endif
69411+
69412 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69413 if (error & ~PAGE_MASK)
69414 return error;
69415
69416+ charged = len >> PAGE_SHIFT;
69417+
69418 /*
69419 * mlock MCL_FUTURE?
69420 */
69421 if (mm->def_flags & VM_LOCKED) {
69422 unsigned long locked, lock_limit;
69423- locked = len >> PAGE_SHIFT;
69424+ locked = charged;
69425 locked += mm->locked_vm;
69426 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
69427 lock_limit >>= PAGE_SHIFT;
69428@@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
69429 /*
69430 * Clear old maps. this also does some error checking for us
69431 */
69432- munmap_back:
69433 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69434 if (vma && vma->vm_start < addr + len) {
69435 if (do_munmap(mm, addr, len))
69436 return -ENOMEM;
69437- goto munmap_back;
69438+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69439+ BUG_ON(vma && vma->vm_start < addr + len);
69440 }
69441
69442 /* Check against address space limits *after* clearing old maps... */
69443- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69444+ if (!may_expand_vm(mm, charged))
69445 return -ENOMEM;
69446
69447 if (mm->map_count > sysctl_max_map_count)
69448 return -ENOMEM;
69449
69450- if (security_vm_enough_memory(len >> PAGE_SHIFT))
69451+ if (security_vm_enough_memory(charged))
69452 return -ENOMEM;
69453
69454 /* Can we just expand an old private anonymous mapping? */
69455@@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
69456 */
69457 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69458 if (!vma) {
69459- vm_unacct_memory(len >> PAGE_SHIFT);
69460+ vm_unacct_memory(charged);
69461 return -ENOMEM;
69462 }
69463
69464@@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
69465 vma->vm_page_prot = vm_get_page_prot(flags);
69466 vma_link(mm, vma, prev, rb_link, rb_parent);
69467 out:
69468- mm->total_vm += len >> PAGE_SHIFT;
69469+ mm->total_vm += charged;
69470 if (flags & VM_LOCKED) {
69471 if (!mlock_vma_pages_range(vma, addr, addr + len))
69472- mm->locked_vm += (len >> PAGE_SHIFT);
69473+ mm->locked_vm += charged;
69474 }
69475+ track_exec_limit(mm, addr, addr + len, flags);
69476 return addr;
69477 }
69478
69479@@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
69480 * Walk the list again, actually closing and freeing it,
69481 * with preemption enabled, without holding any MM locks.
69482 */
69483- while (vma)
69484+ while (vma) {
69485+ vma->vm_mirror = NULL;
69486 vma = remove_vma(vma);
69487+ }
69488
69489 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69490 }
69491@@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
69492 struct vm_area_struct * __vma, * prev;
69493 struct rb_node ** rb_link, * rb_parent;
69494
69495+#ifdef CONFIG_PAX_SEGMEXEC
69496+ struct vm_area_struct *vma_m = NULL;
69497+#endif
69498+
69499 /*
69500 * The vm_pgoff of a purely anonymous vma should be irrelevant
69501 * until its first write fault, when page's anon_vma and index
69502@@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
69503 if ((vma->vm_flags & VM_ACCOUNT) &&
69504 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69505 return -ENOMEM;
69506+
69507+#ifdef CONFIG_PAX_SEGMEXEC
69508+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69509+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69510+ if (!vma_m)
69511+ return -ENOMEM;
69512+ }
69513+#endif
69514+
69515 vma_link(mm, vma, prev, rb_link, rb_parent);
69516+
69517+#ifdef CONFIG_PAX_SEGMEXEC
69518+ if (vma_m)
69519+ pax_mirror_vma(vma_m, vma);
69520+#endif
69521+
69522 return 0;
69523 }
69524
69525@@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
69526 struct rb_node **rb_link, *rb_parent;
69527 struct mempolicy *pol;
69528
69529+ BUG_ON(vma->vm_mirror);
69530+
69531 /*
69532 * If anonymous vma has not yet been faulted, update new pgoff
69533 * to match new location, to increase its chance of merging.
69534@@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
69535 return new_vma;
69536 }
69537
69538+#ifdef CONFIG_PAX_SEGMEXEC
69539+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69540+{
69541+ struct vm_area_struct *prev_m;
69542+ struct rb_node **rb_link_m, *rb_parent_m;
69543+ struct mempolicy *pol_m;
69544+
69545+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69546+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69547+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69548+ *vma_m = *vma;
69549+ pol_m = vma_policy(vma_m);
69550+ mpol_get(pol_m);
69551+ vma_set_policy(vma_m, pol_m);
69552+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69553+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69554+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69555+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69556+ if (vma_m->vm_file)
69557+ get_file(vma_m->vm_file);
69558+ if (vma_m->vm_ops && vma_m->vm_ops->open)
69559+ vma_m->vm_ops->open(vma_m);
69560+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69561+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69562+ vma_m->vm_mirror = vma;
69563+ vma->vm_mirror = vma_m;
69564+}
69565+#endif
69566+
69567 /*
69568 * Return true if the calling process may expand its vm space by the passed
69569 * number of pages
69570@@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
69571 unsigned long lim;
69572
69573 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
69574-
69575+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69576 if (cur + npages > lim)
69577 return 0;
69578 return 1;
69579@@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
69580 vma->vm_start = addr;
69581 vma->vm_end = addr + len;
69582
69583+#ifdef CONFIG_PAX_MPROTECT
69584+ if (mm->pax_flags & MF_PAX_MPROTECT) {
69585+#ifndef CONFIG_PAX_MPROTECT_COMPAT
69586+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69587+ return -EPERM;
69588+ if (!(vm_flags & VM_EXEC))
69589+ vm_flags &= ~VM_MAYEXEC;
69590+#else
69591+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69592+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69593+#endif
69594+ else
69595+ vm_flags &= ~VM_MAYWRITE;
69596+ }
69597+#endif
69598+
69599 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69600 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69601
69602diff -urNp linux-2.6.32.45/mm/mprotect.c linux-2.6.32.45/mm/mprotect.c
69603--- linux-2.6.32.45/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
69604+++ linux-2.6.32.45/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
69605@@ -24,10 +24,16 @@
69606 #include <linux/mmu_notifier.h>
69607 #include <linux/migrate.h>
69608 #include <linux/perf_event.h>
69609+
69610+#ifdef CONFIG_PAX_MPROTECT
69611+#include <linux/elf.h>
69612+#endif
69613+
69614 #include <asm/uaccess.h>
69615 #include <asm/pgtable.h>
69616 #include <asm/cacheflush.h>
69617 #include <asm/tlbflush.h>
69618+#include <asm/mmu_context.h>
69619
69620 #ifndef pgprot_modify
69621 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69622@@ -132,6 +138,48 @@ static void change_protection(struct vm_
69623 flush_tlb_range(vma, start, end);
69624 }
69625
69626+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69627+/* called while holding the mmap semaphor for writing except stack expansion */
69628+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69629+{
69630+ unsigned long oldlimit, newlimit = 0UL;
69631+
69632+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
69633+ return;
69634+
69635+ spin_lock(&mm->page_table_lock);
69636+ oldlimit = mm->context.user_cs_limit;
69637+ if ((prot & VM_EXEC) && oldlimit < end)
69638+ /* USER_CS limit moved up */
69639+ newlimit = end;
69640+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69641+ /* USER_CS limit moved down */
69642+ newlimit = start;
69643+
69644+ if (newlimit) {
69645+ mm->context.user_cs_limit = newlimit;
69646+
69647+#ifdef CONFIG_SMP
69648+ wmb();
69649+ cpus_clear(mm->context.cpu_user_cs_mask);
69650+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69651+#endif
69652+
69653+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69654+ }
69655+ spin_unlock(&mm->page_table_lock);
69656+ if (newlimit == end) {
69657+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
69658+
69659+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
69660+ if (is_vm_hugetlb_page(vma))
69661+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69662+ else
69663+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69664+ }
69665+}
69666+#endif
69667+
69668 int
69669 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69670 unsigned long start, unsigned long end, unsigned long newflags)
69671@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
69672 int error;
69673 int dirty_accountable = 0;
69674
69675+#ifdef CONFIG_PAX_SEGMEXEC
69676+ struct vm_area_struct *vma_m = NULL;
69677+ unsigned long start_m, end_m;
69678+
69679+ start_m = start + SEGMEXEC_TASK_SIZE;
69680+ end_m = end + SEGMEXEC_TASK_SIZE;
69681+#endif
69682+
69683 if (newflags == oldflags) {
69684 *pprev = vma;
69685 return 0;
69686 }
69687
69688+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69689+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69690+
69691+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69692+ return -ENOMEM;
69693+
69694+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69695+ return -ENOMEM;
69696+ }
69697+
69698 /*
69699 * If we make a private mapping writable we increase our commit;
69700 * but (without finer accounting) cannot reduce our commit if we
69701@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
69702 }
69703 }
69704
69705+#ifdef CONFIG_PAX_SEGMEXEC
69706+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69707+ if (start != vma->vm_start) {
69708+ error = split_vma(mm, vma, start, 1);
69709+ if (error)
69710+ goto fail;
69711+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69712+ *pprev = (*pprev)->vm_next;
69713+ }
69714+
69715+ if (end != vma->vm_end) {
69716+ error = split_vma(mm, vma, end, 0);
69717+ if (error)
69718+ goto fail;
69719+ }
69720+
69721+ if (pax_find_mirror_vma(vma)) {
69722+ error = __do_munmap(mm, start_m, end_m - start_m);
69723+ if (error)
69724+ goto fail;
69725+ } else {
69726+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69727+ if (!vma_m) {
69728+ error = -ENOMEM;
69729+ goto fail;
69730+ }
69731+ vma->vm_flags = newflags;
69732+ pax_mirror_vma(vma_m, vma);
69733+ }
69734+ }
69735+#endif
69736+
69737 /*
69738 * First try to merge with previous and/or next vma.
69739 */
69740@@ -195,9 +293,21 @@ success:
69741 * vm_flags and vm_page_prot are protected by the mmap_sem
69742 * held in write mode.
69743 */
69744+
69745+#ifdef CONFIG_PAX_SEGMEXEC
69746+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
69747+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
69748+#endif
69749+
69750 vma->vm_flags = newflags;
69751+
69752+#ifdef CONFIG_PAX_MPROTECT
69753+ if (mm->binfmt && mm->binfmt->handle_mprotect)
69754+ mm->binfmt->handle_mprotect(vma, newflags);
69755+#endif
69756+
69757 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
69758- vm_get_page_prot(newflags));
69759+ vm_get_page_prot(vma->vm_flags));
69760
69761 if (vma_wants_writenotify(vma)) {
69762 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
69763@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69764 end = start + len;
69765 if (end <= start)
69766 return -ENOMEM;
69767+
69768+#ifdef CONFIG_PAX_SEGMEXEC
69769+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69770+ if (end > SEGMEXEC_TASK_SIZE)
69771+ return -EINVAL;
69772+ } else
69773+#endif
69774+
69775+ if (end > TASK_SIZE)
69776+ return -EINVAL;
69777+
69778 if (!arch_validate_prot(prot))
69779 return -EINVAL;
69780
69781@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69782 /*
69783 * Does the application expect PROT_READ to imply PROT_EXEC:
69784 */
69785- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
69786+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
69787 prot |= PROT_EXEC;
69788
69789 vm_flags = calc_vm_prot_bits(prot);
69790@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69791 if (start > vma->vm_start)
69792 prev = vma;
69793
69794+#ifdef CONFIG_PAX_MPROTECT
69795+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
69796+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
69797+#endif
69798+
69799 for (nstart = start ; ; ) {
69800 unsigned long newflags;
69801
69802@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69803
69804 /* newflags >> 4 shift VM_MAY% in place of VM_% */
69805 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
69806+ if (prot & (PROT_WRITE | PROT_EXEC))
69807+ gr_log_rwxmprotect(vma->vm_file);
69808+
69809+ error = -EACCES;
69810+ goto out;
69811+ }
69812+
69813+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
69814 error = -EACCES;
69815 goto out;
69816 }
69817@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
69818 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
69819 if (error)
69820 goto out;
69821+
69822+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
69823+
69824 nstart = tmp;
69825
69826 if (nstart < prev->vm_end)
69827diff -urNp linux-2.6.32.45/mm/mremap.c linux-2.6.32.45/mm/mremap.c
69828--- linux-2.6.32.45/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
69829+++ linux-2.6.32.45/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
69830@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
69831 continue;
69832 pte = ptep_clear_flush(vma, old_addr, old_pte);
69833 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
69834+
69835+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69836+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
69837+ pte = pte_exprotect(pte);
69838+#endif
69839+
69840 set_pte_at(mm, new_addr, new_pte, pte);
69841 }
69842
69843@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
69844 if (is_vm_hugetlb_page(vma))
69845 goto Einval;
69846
69847+#ifdef CONFIG_PAX_SEGMEXEC
69848+ if (pax_find_mirror_vma(vma))
69849+ goto Einval;
69850+#endif
69851+
69852 /* We can't remap across vm area boundaries */
69853 if (old_len > vma->vm_end - addr)
69854 goto Efault;
69855@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
69856 unsigned long ret = -EINVAL;
69857 unsigned long charged = 0;
69858 unsigned long map_flags;
69859+ unsigned long pax_task_size = TASK_SIZE;
69860
69861 if (new_addr & ~PAGE_MASK)
69862 goto out;
69863
69864- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
69865+#ifdef CONFIG_PAX_SEGMEXEC
69866+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69867+ pax_task_size = SEGMEXEC_TASK_SIZE;
69868+#endif
69869+
69870+ pax_task_size -= PAGE_SIZE;
69871+
69872+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
69873 goto out;
69874
69875 /* Check if the location we're moving into overlaps the
69876 * old location at all, and fail if it does.
69877 */
69878- if ((new_addr <= addr) && (new_addr+new_len) > addr)
69879- goto out;
69880-
69881- if ((addr <= new_addr) && (addr+old_len) > new_addr)
69882+ if (addr + old_len > new_addr && new_addr + new_len > addr)
69883 goto out;
69884
69885 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69886@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
69887 struct vm_area_struct *vma;
69888 unsigned long ret = -EINVAL;
69889 unsigned long charged = 0;
69890+ unsigned long pax_task_size = TASK_SIZE;
69891
69892 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
69893 goto out;
69894@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
69895 if (!new_len)
69896 goto out;
69897
69898+#ifdef CONFIG_PAX_SEGMEXEC
69899+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
69900+ pax_task_size = SEGMEXEC_TASK_SIZE;
69901+#endif
69902+
69903+ pax_task_size -= PAGE_SIZE;
69904+
69905+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
69906+ old_len > pax_task_size || addr > pax_task_size-old_len)
69907+ goto out;
69908+
69909 if (flags & MREMAP_FIXED) {
69910 if (flags & MREMAP_MAYMOVE)
69911 ret = mremap_to(addr, old_len, new_addr, new_len);
69912@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
69913 addr + new_len);
69914 }
69915 ret = addr;
69916+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
69917 goto out;
69918 }
69919 }
69920@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
69921 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
69922 if (ret)
69923 goto out;
69924+
69925+ map_flags = vma->vm_flags;
69926 ret = move_vma(vma, addr, old_len, new_len, new_addr);
69927+ if (!(ret & ~PAGE_MASK)) {
69928+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
69929+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
69930+ }
69931 }
69932 out:
69933 if (ret & ~PAGE_MASK)
69934diff -urNp linux-2.6.32.45/mm/nommu.c linux-2.6.32.45/mm/nommu.c
69935--- linux-2.6.32.45/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
69936+++ linux-2.6.32.45/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
69937@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
69938 int sysctl_overcommit_ratio = 50; /* default is 50% */
69939 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69940 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
69941-int heap_stack_gap = 0;
69942
69943 atomic_long_t mmap_pages_allocated;
69944
69945@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
69946 EXPORT_SYMBOL(find_vma);
69947
69948 /*
69949- * find a VMA
69950- * - we don't extend stack VMAs under NOMMU conditions
69951- */
69952-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
69953-{
69954- return find_vma(mm, addr);
69955-}
69956-
69957-/*
69958 * expand a stack to a given address
69959 * - not supported under NOMMU conditions
69960 */
69961diff -urNp linux-2.6.32.45/mm/page_alloc.c linux-2.6.32.45/mm/page_alloc.c
69962--- linux-2.6.32.45/mm/page_alloc.c 2011-06-25 12:55:35.000000000 -0400
69963+++ linux-2.6.32.45/mm/page_alloc.c 2011-07-09 09:13:08.000000000 -0400
69964@@ -289,7 +289,7 @@ out:
69965 * This usage means that zero-order pages may not be compound.
69966 */
69967
69968-static void free_compound_page(struct page *page)
69969+void free_compound_page(struct page *page)
69970 {
69971 __free_pages_ok(page, compound_order(page));
69972 }
69973@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
69974 int bad = 0;
69975 int wasMlocked = __TestClearPageMlocked(page);
69976
69977+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69978+ unsigned long index = 1UL << order;
69979+#endif
69980+
69981 kmemcheck_free_shadow(page, order);
69982
69983 for (i = 0 ; i < (1 << order) ; ++i)
69984@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
69985 debug_check_no_obj_freed(page_address(page),
69986 PAGE_SIZE << order);
69987 }
69988+
69989+#ifdef CONFIG_PAX_MEMORY_SANITIZE
69990+ for (; index; --index)
69991+ sanitize_highpage(page + index - 1);
69992+#endif
69993+
69994 arch_free_page(page, order);
69995 kernel_map_pages(page, 1 << order, 0);
69996
69997@@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
69998 arch_alloc_page(page, order);
69999 kernel_map_pages(page, 1 << order, 1);
70000
70001+#ifndef CONFIG_PAX_MEMORY_SANITIZE
70002 if (gfp_flags & __GFP_ZERO)
70003 prep_zero_page(page, order, gfp_flags);
70004+#endif
70005
70006 if (order && (gfp_flags & __GFP_COMP))
70007 prep_compound_page(page, order);
70008@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
70009 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
70010 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
70011 }
70012+
70013+#ifdef CONFIG_PAX_MEMORY_SANITIZE
70014+ sanitize_highpage(page);
70015+#endif
70016+
70017 arch_free_page(page, 0);
70018 kernel_map_pages(page, 1, 0);
70019
70020@@ -2179,6 +2196,8 @@ void show_free_areas(void)
70021 int cpu;
70022 struct zone *zone;
70023
70024+ pax_track_stack();
70025+
70026 for_each_populated_zone(zone) {
70027 show_node(zone);
70028 printk("%s per-cpu:\n", zone->name);
70029@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
70030 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
70031 }
70032 #else
70033-static void inline setup_usemap(struct pglist_data *pgdat,
70034+static inline void setup_usemap(struct pglist_data *pgdat,
70035 struct zone *zone, unsigned long zonesize) {}
70036 #endif /* CONFIG_SPARSEMEM */
70037
70038diff -urNp linux-2.6.32.45/mm/percpu.c linux-2.6.32.45/mm/percpu.c
70039--- linux-2.6.32.45/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
70040+++ linux-2.6.32.45/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
70041@@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
70042 static unsigned int pcpu_last_unit_cpu __read_mostly;
70043
70044 /* the address of the first chunk which starts with the kernel static area */
70045-void *pcpu_base_addr __read_mostly;
70046+void *pcpu_base_addr __read_only;
70047 EXPORT_SYMBOL_GPL(pcpu_base_addr);
70048
70049 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
70050diff -urNp linux-2.6.32.45/mm/rmap.c linux-2.6.32.45/mm/rmap.c
70051--- linux-2.6.32.45/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
70052+++ linux-2.6.32.45/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
70053@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
70054 /* page_table_lock to protect against threads */
70055 spin_lock(&mm->page_table_lock);
70056 if (likely(!vma->anon_vma)) {
70057+
70058+#ifdef CONFIG_PAX_SEGMEXEC
70059+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70060+
70061+ if (vma_m) {
70062+ BUG_ON(vma_m->anon_vma);
70063+ vma_m->anon_vma = anon_vma;
70064+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
70065+ }
70066+#endif
70067+
70068 vma->anon_vma = anon_vma;
70069 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
70070 allocated = NULL;
70071diff -urNp linux-2.6.32.45/mm/shmem.c linux-2.6.32.45/mm/shmem.c
70072--- linux-2.6.32.45/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
70073+++ linux-2.6.32.45/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
70074@@ -31,7 +31,7 @@
70075 #include <linux/swap.h>
70076 #include <linux/ima.h>
70077
70078-static struct vfsmount *shm_mnt;
70079+struct vfsmount *shm_mnt;
70080
70081 #ifdef CONFIG_SHMEM
70082 /*
70083@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
70084 goto unlock;
70085 }
70086 entry = shmem_swp_entry(info, index, NULL);
70087+ if (!entry)
70088+ goto unlock;
70089 if (entry->val) {
70090 /*
70091 * The more uptodate page coming down from a stacked
70092@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
70093 struct vm_area_struct pvma;
70094 struct page *page;
70095
70096+ pax_track_stack();
70097+
70098 spol = mpol_cond_copy(&mpol,
70099 mpol_shared_policy_lookup(&info->policy, idx));
70100
70101@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
70102
70103 info = SHMEM_I(inode);
70104 inode->i_size = len-1;
70105- if (len <= (char *)inode - (char *)info) {
70106+ if (len <= (char *)inode - (char *)info && len <= 64) {
70107 /* do it inline */
70108 memcpy(info, symname, len);
70109 inode->i_op = &shmem_symlink_inline_operations;
70110@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
70111 int err = -ENOMEM;
70112
70113 /* Round up to L1_CACHE_BYTES to resist false sharing */
70114- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70115- L1_CACHE_BYTES), GFP_KERNEL);
70116+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70117 if (!sbinfo)
70118 return -ENOMEM;
70119
70120diff -urNp linux-2.6.32.45/mm/slab.c linux-2.6.32.45/mm/slab.c
70121--- linux-2.6.32.45/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
70122+++ linux-2.6.32.45/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
70123@@ -174,7 +174,7 @@
70124
70125 /* Legal flag mask for kmem_cache_create(). */
70126 #if DEBUG
70127-# define CREATE_MASK (SLAB_RED_ZONE | \
70128+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70129 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70130 SLAB_CACHE_DMA | \
70131 SLAB_STORE_USER | \
70132@@ -182,7 +182,7 @@
70133 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70134 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70135 #else
70136-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70137+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70138 SLAB_CACHE_DMA | \
70139 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70140 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70141@@ -308,7 +308,7 @@ struct kmem_list3 {
70142 * Need this for bootstrapping a per node allocator.
70143 */
70144 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70145-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70146+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70147 #define CACHE_CACHE 0
70148 #define SIZE_AC MAX_NUMNODES
70149 #define SIZE_L3 (2 * MAX_NUMNODES)
70150@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
70151 if ((x)->max_freeable < i) \
70152 (x)->max_freeable = i; \
70153 } while (0)
70154-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70155-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70156-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70157-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70158+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70159+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70160+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70161+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70162 #else
70163 #define STATS_INC_ACTIVE(x) do { } while (0)
70164 #define STATS_DEC_ACTIVE(x) do { } while (0)
70165@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
70166 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70167 */
70168 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70169- const struct slab *slab, void *obj)
70170+ const struct slab *slab, const void *obj)
70171 {
70172 u32 offset = (obj - slab->s_mem);
70173 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70174@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
70175 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
70176 sizes[INDEX_AC].cs_size,
70177 ARCH_KMALLOC_MINALIGN,
70178- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70179+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70180 NULL);
70181
70182 if (INDEX_AC != INDEX_L3) {
70183@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
70184 kmem_cache_create(names[INDEX_L3].name,
70185 sizes[INDEX_L3].cs_size,
70186 ARCH_KMALLOC_MINALIGN,
70187- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70188+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70189 NULL);
70190 }
70191
70192@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
70193 sizes->cs_cachep = kmem_cache_create(names->name,
70194 sizes->cs_size,
70195 ARCH_KMALLOC_MINALIGN,
70196- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70197+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70198 NULL);
70199 }
70200 #ifdef CONFIG_ZONE_DMA
70201@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
70202 }
70203 /* cpu stats */
70204 {
70205- unsigned long allochit = atomic_read(&cachep->allochit);
70206- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
70207- unsigned long freehit = atomic_read(&cachep->freehit);
70208- unsigned long freemiss = atomic_read(&cachep->freemiss);
70209+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
70210+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
70211+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
70212+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
70213
70214 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
70215 allochit, allocmiss, freehit, freemiss);
70216@@ -4471,15 +4471,66 @@ static const struct file_operations proc
70217
70218 static int __init slab_proc_init(void)
70219 {
70220- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
70221+ mode_t gr_mode = S_IRUGO;
70222+
70223+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70224+ gr_mode = S_IRUSR;
70225+#endif
70226+
70227+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
70228 #ifdef CONFIG_DEBUG_SLAB_LEAK
70229- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
70230+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
70231 #endif
70232 return 0;
70233 }
70234 module_init(slab_proc_init);
70235 #endif
70236
70237+void check_object_size(const void *ptr, unsigned long n, bool to)
70238+{
70239+
70240+#ifdef CONFIG_PAX_USERCOPY
70241+ struct page *page;
70242+ struct kmem_cache *cachep = NULL;
70243+ struct slab *slabp;
70244+ unsigned int objnr;
70245+ unsigned long offset;
70246+
70247+ if (!n)
70248+ return;
70249+
70250+ if (ZERO_OR_NULL_PTR(ptr))
70251+ goto report;
70252+
70253+ if (!virt_addr_valid(ptr))
70254+ return;
70255+
70256+ page = virt_to_head_page(ptr);
70257+
70258+ if (!PageSlab(page)) {
70259+ if (object_is_on_stack(ptr, n) == -1)
70260+ goto report;
70261+ return;
70262+ }
70263+
70264+ cachep = page_get_cache(page);
70265+ if (!(cachep->flags & SLAB_USERCOPY))
70266+ goto report;
70267+
70268+ slabp = page_get_slab(page);
70269+ objnr = obj_to_index(cachep, slabp, ptr);
70270+ BUG_ON(objnr >= cachep->num);
70271+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
70272+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
70273+ return;
70274+
70275+report:
70276+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
70277+#endif
70278+
70279+}
70280+EXPORT_SYMBOL(check_object_size);
70281+
70282 /**
70283 * ksize - get the actual amount of memory allocated for a given object
70284 * @objp: Pointer to the object
70285diff -urNp linux-2.6.32.45/mm/slob.c linux-2.6.32.45/mm/slob.c
70286--- linux-2.6.32.45/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
70287+++ linux-2.6.32.45/mm/slob.c 2011-07-06 19:53:33.000000000 -0400
70288@@ -29,7 +29,7 @@
70289 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70290 * alloc_pages() directly, allocating compound pages so the page order
70291 * does not have to be separately tracked, and also stores the exact
70292- * allocation size in page->private so that it can be used to accurately
70293+ * allocation size in slob_page->size so that it can be used to accurately
70294 * provide ksize(). These objects are detected in kfree() because slob_page()
70295 * is false for them.
70296 *
70297@@ -58,6 +58,7 @@
70298 */
70299
70300 #include <linux/kernel.h>
70301+#include <linux/sched.h>
70302 #include <linux/slab.h>
70303 #include <linux/mm.h>
70304 #include <linux/swap.h> /* struct reclaim_state */
70305@@ -100,7 +101,8 @@ struct slob_page {
70306 unsigned long flags; /* mandatory */
70307 atomic_t _count; /* mandatory */
70308 slobidx_t units; /* free units left in page */
70309- unsigned long pad[2];
70310+ unsigned long pad[1];
70311+ unsigned long size; /* size when >=PAGE_SIZE */
70312 slob_t *free; /* first free slob_t in page */
70313 struct list_head list; /* linked list of free pages */
70314 };
70315@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
70316 */
70317 static inline int is_slob_page(struct slob_page *sp)
70318 {
70319- return PageSlab((struct page *)sp);
70320+ return PageSlab((struct page *)sp) && !sp->size;
70321 }
70322
70323 static inline void set_slob_page(struct slob_page *sp)
70324@@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
70325
70326 static inline struct slob_page *slob_page(const void *addr)
70327 {
70328- return (struct slob_page *)virt_to_page(addr);
70329+ return (struct slob_page *)virt_to_head_page(addr);
70330 }
70331
70332 /*
70333@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
70334 /*
70335 * Return the size of a slob block.
70336 */
70337-static slobidx_t slob_units(slob_t *s)
70338+static slobidx_t slob_units(const slob_t *s)
70339 {
70340 if (s->units > 0)
70341 return s->units;
70342@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
70343 /*
70344 * Return the next free slob block pointer after this one.
70345 */
70346-static slob_t *slob_next(slob_t *s)
70347+static slob_t *slob_next(const slob_t *s)
70348 {
70349 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70350 slobidx_t next;
70351@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
70352 /*
70353 * Returns true if s is the last free block in its page.
70354 */
70355-static int slob_last(slob_t *s)
70356+static int slob_last(const slob_t *s)
70357 {
70358 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70359 }
70360@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
70361 if (!page)
70362 return NULL;
70363
70364+ set_slob_page(page);
70365 return page_address(page);
70366 }
70367
70368@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
70369 if (!b)
70370 return NULL;
70371 sp = slob_page(b);
70372- set_slob_page(sp);
70373
70374 spin_lock_irqsave(&slob_lock, flags);
70375 sp->units = SLOB_UNITS(PAGE_SIZE);
70376 sp->free = b;
70377+ sp->size = 0;
70378 INIT_LIST_HEAD(&sp->list);
70379 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70380 set_slob_page_free(sp, slob_list);
70381@@ -475,10 +478,9 @@ out:
70382 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
70383 #endif
70384
70385-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70386+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70387 {
70388- unsigned int *m;
70389- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70390+ slob_t *m;
70391 void *ret;
70392
70393 lockdep_trace_alloc(gfp);
70394@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
70395
70396 if (!m)
70397 return NULL;
70398- *m = size;
70399+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70400+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70401+ m[0].units = size;
70402+ m[1].units = align;
70403 ret = (void *)m + align;
70404
70405 trace_kmalloc_node(_RET_IP_, ret,
70406@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t
70407
70408 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
70409 if (ret) {
70410- struct page *page;
70411- page = virt_to_page(ret);
70412- page->private = size;
70413+ struct slob_page *sp;
70414+ sp = slob_page(ret);
70415+ sp->size = size;
70416 }
70417
70418 trace_kmalloc_node(_RET_IP_, ret,
70419 size, PAGE_SIZE << order, gfp, node);
70420 }
70421
70422- kmemleak_alloc(ret, size, 1, gfp);
70423+ return ret;
70424+}
70425+
70426+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70427+{
70428+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70429+ void *ret = __kmalloc_node_align(size, gfp, node, align);
70430+
70431+ if (!ZERO_OR_NULL_PTR(ret))
70432+ kmemleak_alloc(ret, size, 1, gfp);
70433 return ret;
70434 }
70435 EXPORT_SYMBOL(__kmalloc_node);
70436@@ -528,13 +542,88 @@ void kfree(const void *block)
70437 sp = slob_page(block);
70438 if (is_slob_page(sp)) {
70439 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70440- unsigned int *m = (unsigned int *)(block - align);
70441- slob_free(m, *m + align);
70442- } else
70443+ slob_t *m = (slob_t *)(block - align);
70444+ slob_free(m, m[0].units + align);
70445+ } else {
70446+ clear_slob_page(sp);
70447+ free_slob_page(sp);
70448+ sp->size = 0;
70449 put_page(&sp->page);
70450+ }
70451 }
70452 EXPORT_SYMBOL(kfree);
70453
70454+void check_object_size(const void *ptr, unsigned long n, bool to)
70455+{
70456+
70457+#ifdef CONFIG_PAX_USERCOPY
70458+ struct slob_page *sp;
70459+ const slob_t *free;
70460+ const void *base;
70461+ unsigned long flags;
70462+
70463+ if (!n)
70464+ return;
70465+
70466+ if (ZERO_OR_NULL_PTR(ptr))
70467+ goto report;
70468+
70469+ if (!virt_addr_valid(ptr))
70470+ return;
70471+
70472+ sp = slob_page(ptr);
70473+ if (!PageSlab((struct page*)sp)) {
70474+ if (object_is_on_stack(ptr, n) == -1)
70475+ goto report;
70476+ return;
70477+ }
70478+
70479+ if (sp->size) {
70480+ base = page_address(&sp->page);
70481+ if (base <= ptr && n <= sp->size - (ptr - base))
70482+ return;
70483+ goto report;
70484+ }
70485+
70486+ /* some tricky double walking to find the chunk */
70487+ spin_lock_irqsave(&slob_lock, flags);
70488+ base = (void *)((unsigned long)ptr & PAGE_MASK);
70489+ free = sp->free;
70490+
70491+ while (!slob_last(free) && (void *)free <= ptr) {
70492+ base = free + slob_units(free);
70493+ free = slob_next(free);
70494+ }
70495+
70496+ while (base < (void *)free) {
70497+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70498+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
70499+ int offset;
70500+
70501+ if (ptr < base + align)
70502+ break;
70503+
70504+ offset = ptr - base - align;
70505+ if (offset >= m) {
70506+ base += size;
70507+ continue;
70508+ }
70509+
70510+ if (n > m - offset)
70511+ break;
70512+
70513+ spin_unlock_irqrestore(&slob_lock, flags);
70514+ return;
70515+ }
70516+
70517+ spin_unlock_irqrestore(&slob_lock, flags);
70518+report:
70519+ pax_report_usercopy(ptr, n, to, NULL);
70520+#endif
70521+
70522+}
70523+EXPORT_SYMBOL(check_object_size);
70524+
70525 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70526 size_t ksize(const void *block)
70527 {
70528@@ -547,10 +636,10 @@ size_t ksize(const void *block)
70529 sp = slob_page(block);
70530 if (is_slob_page(sp)) {
70531 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70532- unsigned int *m = (unsigned int *)(block - align);
70533- return SLOB_UNITS(*m) * SLOB_UNIT;
70534+ slob_t *m = (slob_t *)(block - align);
70535+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70536 } else
70537- return sp->page.private;
70538+ return sp->size;
70539 }
70540 EXPORT_SYMBOL(ksize);
70541
70542@@ -566,8 +655,13 @@ struct kmem_cache *kmem_cache_create(con
70543 {
70544 struct kmem_cache *c;
70545
70546+#ifdef CONFIG_PAX_USERCOPY
70547+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
70548+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70549+#else
70550 c = slob_alloc(sizeof(struct kmem_cache),
70551 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70552+#endif
70553
70554 if (c) {
70555 c->name = name;
70556@@ -605,17 +699,25 @@ void *kmem_cache_alloc_node(struct kmem_
70557 {
70558 void *b;
70559
70560+#ifdef CONFIG_PAX_USERCOPY
70561+ b = __kmalloc_node_align(c->size, flags, node, c->align);
70562+#else
70563 if (c->size < PAGE_SIZE) {
70564 b = slob_alloc(c->size, flags, c->align, node);
70565 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70566 SLOB_UNITS(c->size) * SLOB_UNIT,
70567 flags, node);
70568 } else {
70569+ struct slob_page *sp;
70570+
70571 b = slob_new_pages(flags, get_order(c->size), node);
70572+ sp = slob_page(b);
70573+ sp->size = c->size;
70574 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70575 PAGE_SIZE << get_order(c->size),
70576 flags, node);
70577 }
70578+#endif
70579
70580 if (c->ctor)
70581 c->ctor(b);
70582@@ -627,10 +729,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70583
70584 static void __kmem_cache_free(void *b, int size)
70585 {
70586- if (size < PAGE_SIZE)
70587+ struct slob_page *sp = slob_page(b);
70588+
70589+ if (is_slob_page(sp))
70590 slob_free(b, size);
70591- else
70592+ else {
70593+ clear_slob_page(sp);
70594+ free_slob_page(sp);
70595+ sp->size = 0;
70596 slob_free_pages(b, get_order(size));
70597+ }
70598 }
70599
70600 static void kmem_rcu_free(struct rcu_head *head)
70601@@ -643,18 +751,32 @@ static void kmem_rcu_free(struct rcu_hea
70602
70603 void kmem_cache_free(struct kmem_cache *c, void *b)
70604 {
70605+ int size = c->size;
70606+
70607+#ifdef CONFIG_PAX_USERCOPY
70608+ if (size + c->align < PAGE_SIZE) {
70609+ size += c->align;
70610+ b -= c->align;
70611+ }
70612+#endif
70613+
70614 kmemleak_free_recursive(b, c->flags);
70615 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70616 struct slob_rcu *slob_rcu;
70617- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70618+ slob_rcu = b + (size - sizeof(struct slob_rcu));
70619 INIT_RCU_HEAD(&slob_rcu->head);
70620- slob_rcu->size = c->size;
70621+ slob_rcu->size = size;
70622 call_rcu(&slob_rcu->head, kmem_rcu_free);
70623 } else {
70624- __kmem_cache_free(b, c->size);
70625+ __kmem_cache_free(b, size);
70626 }
70627
70628+#ifdef CONFIG_PAX_USERCOPY
70629+ trace_kfree(_RET_IP_, b);
70630+#else
70631 trace_kmem_cache_free(_RET_IP_, b);
70632+#endif
70633+
70634 }
70635 EXPORT_SYMBOL(kmem_cache_free);
70636
70637diff -urNp linux-2.6.32.45/mm/slub.c linux-2.6.32.45/mm/slub.c
70638--- linux-2.6.32.45/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
70639+++ linux-2.6.32.45/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
70640@@ -410,7 +410,7 @@ static void print_track(const char *s, s
70641 if (!t->addr)
70642 return;
70643
70644- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70645+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70646 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70647 }
70648
70649@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
70650
70651 page = virt_to_head_page(x);
70652
70653+ BUG_ON(!PageSlab(page));
70654+
70655 slab_free(s, page, x, _RET_IP_);
70656
70657 trace_kmem_cache_free(_RET_IP_, x);
70658@@ -1937,7 +1939,7 @@ static int slub_min_objects;
70659 * Merge control. If this is set then no merging of slab caches will occur.
70660 * (Could be removed. This was introduced to pacify the merge skeptics.)
70661 */
70662-static int slub_nomerge;
70663+static int slub_nomerge = 1;
70664
70665 /*
70666 * Calculate the order of allocation given an slab object size.
70667@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
70668 * list to avoid pounding the page allocator excessively.
70669 */
70670 set_min_partial(s, ilog2(s->size));
70671- s->refcount = 1;
70672+ atomic_set(&s->refcount, 1);
70673 #ifdef CONFIG_NUMA
70674 s->remote_node_defrag_ratio = 1000;
70675 #endif
70676@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
70677 void kmem_cache_destroy(struct kmem_cache *s)
70678 {
70679 down_write(&slub_lock);
70680- s->refcount--;
70681- if (!s->refcount) {
70682+ if (atomic_dec_and_test(&s->refcount)) {
70683 list_del(&s->list);
70684 up_write(&slub_lock);
70685 if (kmem_cache_close(s)) {
70686@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
70687 __setup("slub_nomerge", setup_slub_nomerge);
70688
70689 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
70690- const char *name, int size, gfp_t gfp_flags)
70691+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
70692 {
70693- unsigned int flags = 0;
70694-
70695 if (gfp_flags & SLUB_DMA)
70696- flags = SLAB_CACHE_DMA;
70697+ flags |= SLAB_CACHE_DMA;
70698
70699 /*
70700 * This function is called with IRQs disabled during early-boot on
70701@@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
70702 EXPORT_SYMBOL(__kmalloc_node);
70703 #endif
70704
70705+void check_object_size(const void *ptr, unsigned long n, bool to)
70706+{
70707+
70708+#ifdef CONFIG_PAX_USERCOPY
70709+ struct page *page;
70710+ struct kmem_cache *s = NULL;
70711+ unsigned long offset;
70712+
70713+ if (!n)
70714+ return;
70715+
70716+ if (ZERO_OR_NULL_PTR(ptr))
70717+ goto report;
70718+
70719+ if (!virt_addr_valid(ptr))
70720+ return;
70721+
70722+ page = get_object_page(ptr);
70723+
70724+ if (!page) {
70725+ if (object_is_on_stack(ptr, n) == -1)
70726+ goto report;
70727+ return;
70728+ }
70729+
70730+ s = page->slab;
70731+ if (!(s->flags & SLAB_USERCOPY))
70732+ goto report;
70733+
70734+ offset = (ptr - page_address(page)) % s->size;
70735+ if (offset <= s->objsize && n <= s->objsize - offset)
70736+ return;
70737+
70738+report:
70739+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
70740+#endif
70741+
70742+}
70743+EXPORT_SYMBOL(check_object_size);
70744+
70745 size_t ksize(const void *object)
70746 {
70747 struct page *page;
70748@@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
70749 * kmem_cache_open for slab_state == DOWN.
70750 */
70751 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
70752- sizeof(struct kmem_cache_node), GFP_NOWAIT);
70753- kmalloc_caches[0].refcount = -1;
70754+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
70755+ atomic_set(&kmalloc_caches[0].refcount, -1);
70756 caches++;
70757
70758 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
70759@@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
70760 /* Caches that are not of the two-to-the-power-of size */
70761 if (KMALLOC_MIN_SIZE <= 32) {
70762 create_kmalloc_cache(&kmalloc_caches[1],
70763- "kmalloc-96", 96, GFP_NOWAIT);
70764+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
70765 caches++;
70766 }
70767 if (KMALLOC_MIN_SIZE <= 64) {
70768 create_kmalloc_cache(&kmalloc_caches[2],
70769- "kmalloc-192", 192, GFP_NOWAIT);
70770+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
70771 caches++;
70772 }
70773
70774 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
70775 create_kmalloc_cache(&kmalloc_caches[i],
70776- "kmalloc", 1 << i, GFP_NOWAIT);
70777+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
70778 caches++;
70779 }
70780
70781@@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
70782 /*
70783 * We may have set a slab to be unmergeable during bootstrap.
70784 */
70785- if (s->refcount < 0)
70786+ if (atomic_read(&s->refcount) < 0)
70787 return 1;
70788
70789 return 0;
70790@@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
70791 if (s) {
70792 int cpu;
70793
70794- s->refcount++;
70795+ atomic_inc(&s->refcount);
70796 /*
70797 * Adjust the object sizes so that we clear
70798 * the complete object on kzalloc.
70799@@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
70800
70801 if (sysfs_slab_alias(s, name)) {
70802 down_write(&slub_lock);
70803- s->refcount--;
70804+ atomic_dec(&s->refcount);
70805 up_write(&slub_lock);
70806 goto err;
70807 }
70808@@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
70809
70810 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
70811 {
70812- return sprintf(buf, "%d\n", s->refcount - 1);
70813+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
70814 }
70815 SLAB_ATTR_RO(aliases);
70816
70817@@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
70818 kfree(s);
70819 }
70820
70821-static struct sysfs_ops slab_sysfs_ops = {
70822+static const struct sysfs_ops slab_sysfs_ops = {
70823 .show = slab_attr_show,
70824 .store = slab_attr_store,
70825 };
70826@@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
70827 return 0;
70828 }
70829
70830-static struct kset_uevent_ops slab_uevent_ops = {
70831+static const struct kset_uevent_ops slab_uevent_ops = {
70832 .filter = uevent_filter,
70833 };
70834
70835@@ -4785,7 +4824,13 @@ static const struct file_operations proc
70836
70837 static int __init slab_proc_init(void)
70838 {
70839- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
70840+ mode_t gr_mode = S_IRUGO;
70841+
70842+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70843+ gr_mode = S_IRUSR;
70844+#endif
70845+
70846+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
70847 return 0;
70848 }
70849 module_init(slab_proc_init);
70850diff -urNp linux-2.6.32.45/mm/swap.c linux-2.6.32.45/mm/swap.c
70851--- linux-2.6.32.45/mm/swap.c 2011-03-27 14:31:47.000000000 -0400
70852+++ linux-2.6.32.45/mm/swap.c 2011-07-09 09:15:19.000000000 -0400
70853@@ -30,6 +30,7 @@
70854 #include <linux/notifier.h>
70855 #include <linux/backing-dev.h>
70856 #include <linux/memcontrol.h>
70857+#include <linux/hugetlb.h>
70858
70859 #include "internal.h"
70860
70861@@ -65,6 +66,8 @@ static void put_compound_page(struct pag
70862 compound_page_dtor *dtor;
70863
70864 dtor = get_compound_page_dtor(page);
70865+ if (!PageHuge(page))
70866+ BUG_ON(dtor != free_compound_page);
70867 (*dtor)(page);
70868 }
70869 }
70870diff -urNp linux-2.6.32.45/mm/util.c linux-2.6.32.45/mm/util.c
70871--- linux-2.6.32.45/mm/util.c 2011-03-27 14:31:47.000000000 -0400
70872+++ linux-2.6.32.45/mm/util.c 2011-04-17 15:56:46.000000000 -0400
70873@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
70874 void arch_pick_mmap_layout(struct mm_struct *mm)
70875 {
70876 mm->mmap_base = TASK_UNMAPPED_BASE;
70877+
70878+#ifdef CONFIG_PAX_RANDMMAP
70879+ if (mm->pax_flags & MF_PAX_RANDMMAP)
70880+ mm->mmap_base += mm->delta_mmap;
70881+#endif
70882+
70883 mm->get_unmapped_area = arch_get_unmapped_area;
70884 mm->unmap_area = arch_unmap_area;
70885 }
70886diff -urNp linux-2.6.32.45/mm/vmalloc.c linux-2.6.32.45/mm/vmalloc.c
70887--- linux-2.6.32.45/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
70888+++ linux-2.6.32.45/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
70889@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
70890
70891 pte = pte_offset_kernel(pmd, addr);
70892 do {
70893- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70894- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70895+
70896+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70897+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
70898+ BUG_ON(!pte_exec(*pte));
70899+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
70900+ continue;
70901+ }
70902+#endif
70903+
70904+ {
70905+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
70906+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70907+ }
70908 } while (pte++, addr += PAGE_SIZE, addr != end);
70909 }
70910
70911@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
70912 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
70913 {
70914 pte_t *pte;
70915+ int ret = -ENOMEM;
70916
70917 /*
70918 * nr is a running index into the array which helps higher level
70919@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
70920 pte = pte_alloc_kernel(pmd, addr);
70921 if (!pte)
70922 return -ENOMEM;
70923+
70924+ pax_open_kernel();
70925 do {
70926 struct page *page = pages[*nr];
70927
70928- if (WARN_ON(!pte_none(*pte)))
70929- return -EBUSY;
70930- if (WARN_ON(!page))
70931- return -ENOMEM;
70932+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70933+ if (!(pgprot_val(prot) & _PAGE_NX))
70934+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
70935+ else
70936+#endif
70937+
70938+ if (WARN_ON(!pte_none(*pte))) {
70939+ ret = -EBUSY;
70940+ goto out;
70941+ }
70942+ if (WARN_ON(!page)) {
70943+ ret = -ENOMEM;
70944+ goto out;
70945+ }
70946 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
70947 (*nr)++;
70948 } while (pte++, addr += PAGE_SIZE, addr != end);
70949- return 0;
70950+ ret = 0;
70951+out:
70952+ pax_close_kernel();
70953+ return ret;
70954 }
70955
70956 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
70957@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
70958 * and fall back on vmalloc() if that fails. Others
70959 * just put it in the vmalloc space.
70960 */
70961-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
70962+#ifdef CONFIG_MODULES
70963+#ifdef MODULES_VADDR
70964 unsigned long addr = (unsigned long)x;
70965 if (addr >= MODULES_VADDR && addr < MODULES_END)
70966 return 1;
70967 #endif
70968+
70969+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
70970+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
70971+ return 1;
70972+#endif
70973+
70974+#endif
70975+
70976 return is_vmalloc_addr(x);
70977 }
70978
70979@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
70980
70981 if (!pgd_none(*pgd)) {
70982 pud_t *pud = pud_offset(pgd, addr);
70983+#ifdef CONFIG_X86
70984+ if (!pud_large(*pud))
70985+#endif
70986 if (!pud_none(*pud)) {
70987 pmd_t *pmd = pmd_offset(pud, addr);
70988+#ifdef CONFIG_X86
70989+ if (!pmd_large(*pmd))
70990+#endif
70991 if (!pmd_none(*pmd)) {
70992 pte_t *ptep, pte;
70993
70994@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
70995 struct rb_node *tmp;
70996
70997 while (*p) {
70998- struct vmap_area *tmp;
70999+ struct vmap_area *varea;
71000
71001 parent = *p;
71002- tmp = rb_entry(parent, struct vmap_area, rb_node);
71003- if (va->va_start < tmp->va_end)
71004+ varea = rb_entry(parent, struct vmap_area, rb_node);
71005+ if (va->va_start < varea->va_end)
71006 p = &(*p)->rb_left;
71007- else if (va->va_end > tmp->va_start)
71008+ else if (va->va_end > varea->va_start)
71009 p = &(*p)->rb_right;
71010 else
71011 BUG();
71012@@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
71013 struct vm_struct *area;
71014
71015 BUG_ON(in_interrupt());
71016+
71017+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71018+ if (flags & VM_KERNEXEC) {
71019+ if (start != VMALLOC_START || end != VMALLOC_END)
71020+ return NULL;
71021+ start = (unsigned long)MODULES_EXEC_VADDR;
71022+ end = (unsigned long)MODULES_EXEC_END;
71023+ }
71024+#endif
71025+
71026 if (flags & VM_IOREMAP) {
71027 int bit = fls(size);
71028
71029@@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
71030 if (count > totalram_pages)
71031 return NULL;
71032
71033+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71034+ if (!(pgprot_val(prot) & _PAGE_NX))
71035+ flags |= VM_KERNEXEC;
71036+#endif
71037+
71038 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71039 __builtin_return_address(0));
71040 if (!area)
71041@@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
71042 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71043 return NULL;
71044
71045+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71046+ if (!(pgprot_val(prot) & _PAGE_NX))
71047+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
71048+ node, gfp_mask, caller);
71049+ else
71050+#endif
71051+
71052 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
71053 VMALLOC_END, node, gfp_mask, caller);
71054
71055@@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
71056 return addr;
71057 }
71058
71059+#undef __vmalloc
71060 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71061 {
71062 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71063@@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
71064 * For tight control over page level allocator and protection flags
71065 * use __vmalloc() instead.
71066 */
71067+#undef vmalloc
71068 void *vmalloc(unsigned long size)
71069 {
71070 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71071@@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
71072 * The resulting memory area is zeroed so it can be mapped to userspace
71073 * without leaking data.
71074 */
71075+#undef vmalloc_user
71076 void *vmalloc_user(unsigned long size)
71077 {
71078 struct vm_struct *area;
71079@@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
71080 * For tight control over page level allocator and protection flags
71081 * use __vmalloc() instead.
71082 */
71083+#undef vmalloc_node
71084 void *vmalloc_node(unsigned long size, int node)
71085 {
71086 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71087@@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
71088 * For tight control over page level allocator and protection flags
71089 * use __vmalloc() instead.
71090 */
71091-
71092+#undef vmalloc_exec
71093 void *vmalloc_exec(unsigned long size)
71094 {
71095- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
71096+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
71097 -1, __builtin_return_address(0));
71098 }
71099
71100@@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
71101 * Allocate enough 32bit PA addressable pages to cover @size from the
71102 * page level allocator and map them into contiguous kernel virtual space.
71103 */
71104+#undef vmalloc_32
71105 void *vmalloc_32(unsigned long size)
71106 {
71107 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
71108@@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
71109 * The resulting memory area is 32bit addressable and zeroed so it can be
71110 * mapped to userspace without leaking data.
71111 */
71112+#undef vmalloc_32_user
71113 void *vmalloc_32_user(unsigned long size)
71114 {
71115 struct vm_struct *area;
71116@@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
71117 unsigned long uaddr = vma->vm_start;
71118 unsigned long usize = vma->vm_end - vma->vm_start;
71119
71120+ BUG_ON(vma->vm_mirror);
71121+
71122 if ((PAGE_SIZE-1) & (unsigned long)addr)
71123 return -EINVAL;
71124
71125diff -urNp linux-2.6.32.45/mm/vmstat.c linux-2.6.32.45/mm/vmstat.c
71126--- linux-2.6.32.45/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
71127+++ linux-2.6.32.45/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
71128@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
71129 *
71130 * vm_stat contains the global counters
71131 */
71132-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71133+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71134 EXPORT_SYMBOL(vm_stat);
71135
71136 #ifdef CONFIG_SMP
71137@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
71138 v = p->vm_stat_diff[i];
71139 p->vm_stat_diff[i] = 0;
71140 local_irq_restore(flags);
71141- atomic_long_add(v, &zone->vm_stat[i]);
71142+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
71143 global_diff[i] += v;
71144 #ifdef CONFIG_NUMA
71145 /* 3 seconds idle till flush */
71146@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
71147
71148 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
71149 if (global_diff[i])
71150- atomic_long_add(global_diff[i], &vm_stat[i]);
71151+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
71152 }
71153
71154 #endif
71155@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
71156 start_cpu_timer(cpu);
71157 #endif
71158 #ifdef CONFIG_PROC_FS
71159- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
71160- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
71161- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
71162- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
71163+ {
71164+ mode_t gr_mode = S_IRUGO;
71165+#ifdef CONFIG_GRKERNSEC_PROC_ADD
71166+ gr_mode = S_IRUSR;
71167+#endif
71168+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
71169+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
71170+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71171+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
71172+#else
71173+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
71174+#endif
71175+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
71176+ }
71177 #endif
71178 return 0;
71179 }
71180diff -urNp linux-2.6.32.45/net/8021q/vlan.c linux-2.6.32.45/net/8021q/vlan.c
71181--- linux-2.6.32.45/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
71182+++ linux-2.6.32.45/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
71183@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
71184 err = -EPERM;
71185 if (!capable(CAP_NET_ADMIN))
71186 break;
71187- if ((args.u.name_type >= 0) &&
71188- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
71189+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
71190 struct vlan_net *vn;
71191
71192 vn = net_generic(net, vlan_net_id);
71193diff -urNp linux-2.6.32.45/net/atm/atm_misc.c linux-2.6.32.45/net/atm/atm_misc.c
71194--- linux-2.6.32.45/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
71195+++ linux-2.6.32.45/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
71196@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
71197 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71198 return 1;
71199 atm_return(vcc,truesize);
71200- atomic_inc(&vcc->stats->rx_drop);
71201+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71202 return 0;
71203 }
71204
71205@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
71206 }
71207 }
71208 atm_return(vcc,guess);
71209- atomic_inc(&vcc->stats->rx_drop);
71210+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71211 return NULL;
71212 }
71213
71214@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
71215
71216 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
71217 {
71218-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71219+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71220 __SONET_ITEMS
71221 #undef __HANDLE_ITEM
71222 }
71223@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
71224
71225 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
71226 {
71227-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
71228+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71229 __SONET_ITEMS
71230 #undef __HANDLE_ITEM
71231 }
71232diff -urNp linux-2.6.32.45/net/atm/lec.h linux-2.6.32.45/net/atm/lec.h
71233--- linux-2.6.32.45/net/atm/lec.h 2011-03-27 14:31:47.000000000 -0400
71234+++ linux-2.6.32.45/net/atm/lec.h 2011-08-05 20:33:55.000000000 -0400
71235@@ -48,7 +48,7 @@ struct lane2_ops {
71236 const u8 *tlvs, u32 sizeoftlvs);
71237 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71238 const u8 *tlvs, u32 sizeoftlvs);
71239-};
71240+} __no_const;
71241
71242 /*
71243 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71244diff -urNp linux-2.6.32.45/net/atm/mpc.c linux-2.6.32.45/net/atm/mpc.c
71245--- linux-2.6.32.45/net/atm/mpc.c 2011-03-27 14:31:47.000000000 -0400
71246+++ linux-2.6.32.45/net/atm/mpc.c 2011-08-05 20:33:55.000000000 -0400
71247@@ -291,8 +291,8 @@ static void start_mpc(struct mpoa_client
71248 printk("mpoa: (%s) start_mpc not starting\n", dev->name);
71249 else {
71250 mpc->old_ops = dev->netdev_ops;
71251- mpc->new_ops = *mpc->old_ops;
71252- mpc->new_ops.ndo_start_xmit = mpc_send_packet;
71253+ memcpy((void *)&mpc->new_ops, mpc->old_ops, sizeof(mpc->new_ops));
71254+ *(void **)&mpc->new_ops.ndo_start_xmit = mpc_send_packet;
71255 dev->netdev_ops = &mpc->new_ops;
71256 }
71257 }
71258diff -urNp linux-2.6.32.45/net/atm/mpoa_caches.c linux-2.6.32.45/net/atm/mpoa_caches.c
71259--- linux-2.6.32.45/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
71260+++ linux-2.6.32.45/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
71261@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
71262 struct timeval now;
71263 struct k_message msg;
71264
71265+ pax_track_stack();
71266+
71267 do_gettimeofday(&now);
71268
71269 write_lock_irq(&client->egress_lock);
71270diff -urNp linux-2.6.32.45/net/atm/proc.c linux-2.6.32.45/net/atm/proc.c
71271--- linux-2.6.32.45/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
71272+++ linux-2.6.32.45/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
71273@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
71274 const struct k_atm_aal_stats *stats)
71275 {
71276 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71277- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
71278- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
71279- atomic_read(&stats->rx_drop));
71280+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71281+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71282+ atomic_read_unchecked(&stats->rx_drop));
71283 }
71284
71285 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71286@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
71287 {
71288 struct sock *sk = sk_atm(vcc);
71289
71290+#ifdef CONFIG_GRKERNSEC_HIDESYM
71291+ seq_printf(seq, "%p ", NULL);
71292+#else
71293 seq_printf(seq, "%p ", vcc);
71294+#endif
71295+
71296 if (!vcc->dev)
71297 seq_printf(seq, "Unassigned ");
71298 else
71299@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
71300 {
71301 if (!vcc->dev)
71302 seq_printf(seq, sizeof(void *) == 4 ?
71303+#ifdef CONFIG_GRKERNSEC_HIDESYM
71304+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
71305+#else
71306 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
71307+#endif
71308 else
71309 seq_printf(seq, "%3d %3d %5d ",
71310 vcc->dev->number, vcc->vpi, vcc->vci);
71311diff -urNp linux-2.6.32.45/net/atm/resources.c linux-2.6.32.45/net/atm/resources.c
71312--- linux-2.6.32.45/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
71313+++ linux-2.6.32.45/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
71314@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
71315 static void copy_aal_stats(struct k_atm_aal_stats *from,
71316 struct atm_aal_stats *to)
71317 {
71318-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71319+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71320 __AAL_STAT_ITEMS
71321 #undef __HANDLE_ITEM
71322 }
71323@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
71324 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71325 struct atm_aal_stats *to)
71326 {
71327-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71328+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71329 __AAL_STAT_ITEMS
71330 #undef __HANDLE_ITEM
71331 }
71332diff -urNp linux-2.6.32.45/net/bluetooth/l2cap.c linux-2.6.32.45/net/bluetooth/l2cap.c
71333--- linux-2.6.32.45/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
71334+++ linux-2.6.32.45/net/bluetooth/l2cap.c 2011-06-25 14:36:21.000000000 -0400
71335@@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
71336 err = -ENOTCONN;
71337 break;
71338 }
71339-
71340+ memset(&cinfo, 0, sizeof(cinfo));
71341 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
71342 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
71343
71344@@ -2719,7 +2719,7 @@ static inline int l2cap_config_req(struc
71345
71346 /* Reject if config buffer is too small. */
71347 len = cmd_len - sizeof(*req);
71348- if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
71349+ if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
71350 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
71351 l2cap_build_conf_rsp(sk, rsp,
71352 L2CAP_CONF_REJECT, flags), rsp);
71353diff -urNp linux-2.6.32.45/net/bluetooth/rfcomm/sock.c linux-2.6.32.45/net/bluetooth/rfcomm/sock.c
71354--- linux-2.6.32.45/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
71355+++ linux-2.6.32.45/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
71356@@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
71357
71358 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
71359
71360+ memset(&cinfo, 0, sizeof(cinfo));
71361 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
71362 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
71363
71364diff -urNp linux-2.6.32.45/net/bridge/br_private.h linux-2.6.32.45/net/bridge/br_private.h
71365--- linux-2.6.32.45/net/bridge/br_private.h 2011-08-09 18:35:30.000000000 -0400
71366+++ linux-2.6.32.45/net/bridge/br_private.h 2011-08-09 18:34:01.000000000 -0400
71367@@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event,
71368
71369 #ifdef CONFIG_SYSFS
71370 /* br_sysfs_if.c */
71371-extern struct sysfs_ops brport_sysfs_ops;
71372+extern const struct sysfs_ops brport_sysfs_ops;
71373 extern int br_sysfs_addif(struct net_bridge_port *p);
71374
71375 /* br_sysfs_br.c */
71376diff -urNp linux-2.6.32.45/net/bridge/br_stp_if.c linux-2.6.32.45/net/bridge/br_stp_if.c
71377--- linux-2.6.32.45/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
71378+++ linux-2.6.32.45/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
71379@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
71380 char *envp[] = { NULL };
71381
71382 if (br->stp_enabled == BR_USER_STP) {
71383- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
71384+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
71385 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
71386 br->dev->name, r);
71387
71388diff -urNp linux-2.6.32.45/net/bridge/br_sysfs_if.c linux-2.6.32.45/net/bridge/br_sysfs_if.c
71389--- linux-2.6.32.45/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
71390+++ linux-2.6.32.45/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
71391@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
71392 return ret;
71393 }
71394
71395-struct sysfs_ops brport_sysfs_ops = {
71396+const struct sysfs_ops brport_sysfs_ops = {
71397 .show = brport_show,
71398 .store = brport_store,
71399 };
71400diff -urNp linux-2.6.32.45/net/bridge/netfilter/ebtables.c linux-2.6.32.45/net/bridge/netfilter/ebtables.c
71401--- linux-2.6.32.45/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
71402+++ linux-2.6.32.45/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
71403@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
71404 unsigned int entries_size, nentries;
71405 char *entries;
71406
71407+ pax_track_stack();
71408+
71409 if (cmd == EBT_SO_GET_ENTRIES) {
71410 entries_size = t->private->entries_size;
71411 nentries = t->private->nentries;
71412diff -urNp linux-2.6.32.45/net/can/bcm.c linux-2.6.32.45/net/can/bcm.c
71413--- linux-2.6.32.45/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
71414+++ linux-2.6.32.45/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
71415@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
71416 struct bcm_sock *bo = bcm_sk(sk);
71417 struct bcm_op *op;
71418
71419+#ifdef CONFIG_GRKERNSEC_HIDESYM
71420+ seq_printf(m, ">>> socket %p", NULL);
71421+ seq_printf(m, " / sk %p", NULL);
71422+ seq_printf(m, " / bo %p", NULL);
71423+#else
71424 seq_printf(m, ">>> socket %p", sk->sk_socket);
71425 seq_printf(m, " / sk %p", sk);
71426 seq_printf(m, " / bo %p", bo);
71427+#endif
71428 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
71429 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
71430 seq_printf(m, " <<<\n");
71431diff -urNp linux-2.6.32.45/net/core/dev.c linux-2.6.32.45/net/core/dev.c
71432--- linux-2.6.32.45/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
71433+++ linux-2.6.32.45/net/core/dev.c 2011-08-05 20:33:55.000000000 -0400
71434@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
71435 if (no_module && capable(CAP_NET_ADMIN))
71436 no_module = request_module("netdev-%s", name);
71437 if (no_module && capable(CAP_SYS_MODULE)) {
71438+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71439+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
71440+#else
71441 if (!request_module("%s", name))
71442 pr_err("Loading kernel module for a network device "
71443 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
71444 "instead\n", name);
71445+#endif
71446 }
71447 }
71448 EXPORT_SYMBOL(dev_load);
71449@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct
71450
71451 struct dev_gso_cb {
71452 void (*destructor)(struct sk_buff *skb);
71453-};
71454+} __no_const;
71455
71456 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
71457
71458@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
71459 }
71460 EXPORT_SYMBOL(netif_rx_ni);
71461
71462-static void net_tx_action(struct softirq_action *h)
71463+static void net_tx_action(void)
71464 {
71465 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71466
71467@@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
71468 EXPORT_SYMBOL(netif_napi_del);
71469
71470
71471-static void net_rx_action(struct softirq_action *h)
71472+static void net_rx_action(void)
71473 {
71474 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
71475 unsigned long time_limit = jiffies + 2;
71476diff -urNp linux-2.6.32.45/net/core/flow.c linux-2.6.32.45/net/core/flow.c
71477--- linux-2.6.32.45/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
71478+++ linux-2.6.32.45/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
71479@@ -35,11 +35,11 @@ struct flow_cache_entry {
71480 atomic_t *object_ref;
71481 };
71482
71483-atomic_t flow_cache_genid = ATOMIC_INIT(0);
71484+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
71485
71486 static u32 flow_hash_shift;
71487 #define flow_hash_size (1 << flow_hash_shift)
71488-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
71489+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
71490
71491 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
71492
71493@@ -52,7 +52,7 @@ struct flow_percpu_info {
71494 u32 hash_rnd;
71495 int count;
71496 };
71497-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
71498+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
71499
71500 #define flow_hash_rnd_recalc(cpu) \
71501 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
71502@@ -69,7 +69,7 @@ struct flow_flush_info {
71503 atomic_t cpuleft;
71504 struct completion completion;
71505 };
71506-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
71507+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
71508
71509 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
71510
71511@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
71512 if (fle->family == family &&
71513 fle->dir == dir &&
71514 flow_key_compare(key, &fle->key) == 0) {
71515- if (fle->genid == atomic_read(&flow_cache_genid)) {
71516+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
71517 void *ret = fle->object;
71518
71519 if (ret)
71520@@ -228,7 +228,7 @@ nocache:
71521 err = resolver(net, key, family, dir, &obj, &obj_ref);
71522
71523 if (fle && !err) {
71524- fle->genid = atomic_read(&flow_cache_genid);
71525+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
71526
71527 if (fle->object)
71528 atomic_dec(fle->object_ref);
71529@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
71530
71531 fle = flow_table(cpu)[i];
71532 for (; fle; fle = fle->next) {
71533- unsigned genid = atomic_read(&flow_cache_genid);
71534+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
71535
71536 if (!fle->object || fle->genid == genid)
71537 continue;
71538diff -urNp linux-2.6.32.45/net/core/rtnetlink.c linux-2.6.32.45/net/core/rtnetlink.c
71539--- linux-2.6.32.45/net/core/rtnetlink.c 2011-03-27 14:31:47.000000000 -0400
71540+++ linux-2.6.32.45/net/core/rtnetlink.c 2011-08-05 20:33:55.000000000 -0400
71541@@ -57,7 +57,7 @@ struct rtnl_link
71542 {
71543 rtnl_doit_func doit;
71544 rtnl_dumpit_func dumpit;
71545-};
71546+} __no_const;
71547
71548 static DEFINE_MUTEX(rtnl_mutex);
71549
71550diff -urNp linux-2.6.32.45/net/core/secure_seq.c linux-2.6.32.45/net/core/secure_seq.c
71551--- linux-2.6.32.45/net/core/secure_seq.c 2011-08-16 20:37:25.000000000 -0400
71552+++ linux-2.6.32.45/net/core/secure_seq.c 2011-08-07 19:48:09.000000000 -0400
71553@@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be3
71554 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
71555
71556 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
71557- __be16 dport)
71558+ __be16 dport)
71559 {
71560 u32 secret[MD5_MESSAGE_BYTES / 4];
71561 u32 hash[MD5_DIGEST_WORDS];
71562@@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __b
71563 secret[i] = net_secret[i];
71564
71565 md5_transform(hash, secret);
71566-
71567 return hash[0];
71568 }
71569 #endif
71570diff -urNp linux-2.6.32.45/net/core/skbuff.c linux-2.6.32.45/net/core/skbuff.c
71571--- linux-2.6.32.45/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
71572+++ linux-2.6.32.45/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
71573@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
71574 struct sk_buff *frag_iter;
71575 struct sock *sk = skb->sk;
71576
71577+ pax_track_stack();
71578+
71579 /*
71580 * __skb_splice_bits() only fails if the output has no room left,
71581 * so no point in going over the frag_list for the error case.
71582diff -urNp linux-2.6.32.45/net/core/sock.c linux-2.6.32.45/net/core/sock.c
71583--- linux-2.6.32.45/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
71584+++ linux-2.6.32.45/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
71585@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
71586 break;
71587
71588 case SO_PEERCRED:
71589+ {
71590+ struct ucred peercred;
71591 if (len > sizeof(sk->sk_peercred))
71592 len = sizeof(sk->sk_peercred);
71593- if (copy_to_user(optval, &sk->sk_peercred, len))
71594+ peercred = sk->sk_peercred;
71595+ if (copy_to_user(optval, &peercred, len))
71596 return -EFAULT;
71597 goto lenout;
71598+ }
71599
71600 case SO_PEERNAME:
71601 {
71602@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
71603 */
71604 smp_wmb();
71605 atomic_set(&sk->sk_refcnt, 1);
71606- atomic_set(&sk->sk_drops, 0);
71607+ atomic_set_unchecked(&sk->sk_drops, 0);
71608 }
71609 EXPORT_SYMBOL(sock_init_data);
71610
71611diff -urNp linux-2.6.32.45/net/decnet/sysctl_net_decnet.c linux-2.6.32.45/net/decnet/sysctl_net_decnet.c
71612--- linux-2.6.32.45/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
71613+++ linux-2.6.32.45/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
71614@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
71615
71616 if (len > *lenp) len = *lenp;
71617
71618- if (copy_to_user(buffer, addr, len))
71619+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
71620 return -EFAULT;
71621
71622 *lenp = len;
71623@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
71624
71625 if (len > *lenp) len = *lenp;
71626
71627- if (copy_to_user(buffer, devname, len))
71628+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
71629 return -EFAULT;
71630
71631 *lenp = len;
71632diff -urNp linux-2.6.32.45/net/econet/Kconfig linux-2.6.32.45/net/econet/Kconfig
71633--- linux-2.6.32.45/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
71634+++ linux-2.6.32.45/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
71635@@ -4,7 +4,7 @@
71636
71637 config ECONET
71638 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
71639- depends on EXPERIMENTAL && INET
71640+ depends on EXPERIMENTAL && INET && BROKEN
71641 ---help---
71642 Econet is a fairly old and slow networking protocol mainly used by
71643 Acorn computers to access file and print servers. It uses native
71644diff -urNp linux-2.6.32.45/net/ieee802154/dgram.c linux-2.6.32.45/net/ieee802154/dgram.c
71645--- linux-2.6.32.45/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
71646+++ linux-2.6.32.45/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
71647@@ -318,7 +318,7 @@ out:
71648 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
71649 {
71650 if (sock_queue_rcv_skb(sk, skb) < 0) {
71651- atomic_inc(&sk->sk_drops);
71652+ atomic_inc_unchecked(&sk->sk_drops);
71653 kfree_skb(skb);
71654 return NET_RX_DROP;
71655 }
71656diff -urNp linux-2.6.32.45/net/ieee802154/raw.c linux-2.6.32.45/net/ieee802154/raw.c
71657--- linux-2.6.32.45/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
71658+++ linux-2.6.32.45/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
71659@@ -206,7 +206,7 @@ out:
71660 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
71661 {
71662 if (sock_queue_rcv_skb(sk, skb) < 0) {
71663- atomic_inc(&sk->sk_drops);
71664+ atomic_inc_unchecked(&sk->sk_drops);
71665 kfree_skb(skb);
71666 return NET_RX_DROP;
71667 }
71668diff -urNp linux-2.6.32.45/net/ipv4/inet_diag.c linux-2.6.32.45/net/ipv4/inet_diag.c
71669--- linux-2.6.32.45/net/ipv4/inet_diag.c 2011-07-13 17:23:04.000000000 -0400
71670+++ linux-2.6.32.45/net/ipv4/inet_diag.c 2011-06-20 19:31:13.000000000 -0400
71671@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
71672 r->idiag_retrans = 0;
71673
71674 r->id.idiag_if = sk->sk_bound_dev_if;
71675+#ifdef CONFIG_GRKERNSEC_HIDESYM
71676+ r->id.idiag_cookie[0] = 0;
71677+ r->id.idiag_cookie[1] = 0;
71678+#else
71679 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
71680 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
71681+#endif
71682
71683 r->id.idiag_sport = inet->sport;
71684 r->id.idiag_dport = inet->dport;
71685@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
71686 r->idiag_family = tw->tw_family;
71687 r->idiag_retrans = 0;
71688 r->id.idiag_if = tw->tw_bound_dev_if;
71689+
71690+#ifdef CONFIG_GRKERNSEC_HIDESYM
71691+ r->id.idiag_cookie[0] = 0;
71692+ r->id.idiag_cookie[1] = 0;
71693+#else
71694 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
71695 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
71696+#endif
71697+
71698 r->id.idiag_sport = tw->tw_sport;
71699 r->id.idiag_dport = tw->tw_dport;
71700 r->id.idiag_src[0] = tw->tw_rcv_saddr;
71701@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
71702 if (sk == NULL)
71703 goto unlock;
71704
71705+#ifndef CONFIG_GRKERNSEC_HIDESYM
71706 err = -ESTALE;
71707 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
71708 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
71709 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
71710 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
71711 goto out;
71712+#endif
71713
71714 err = -ENOMEM;
71715 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
71716@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
71717 r->idiag_retrans = req->retrans;
71718
71719 r->id.idiag_if = sk->sk_bound_dev_if;
71720+
71721+#ifdef CONFIG_GRKERNSEC_HIDESYM
71722+ r->id.idiag_cookie[0] = 0;
71723+ r->id.idiag_cookie[1] = 0;
71724+#else
71725 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
71726 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
71727+#endif
71728
71729 tmo = req->expires - jiffies;
71730 if (tmo < 0)
71731diff -urNp linux-2.6.32.45/net/ipv4/inet_hashtables.c linux-2.6.32.45/net/ipv4/inet_hashtables.c
71732--- linux-2.6.32.45/net/ipv4/inet_hashtables.c 2011-08-16 20:37:25.000000000 -0400
71733+++ linux-2.6.32.45/net/ipv4/inet_hashtables.c 2011-08-16 20:42:30.000000000 -0400
71734@@ -18,12 +18,15 @@
71735 #include <linux/sched.h>
71736 #include <linux/slab.h>
71737 #include <linux/wait.h>
71738+#include <linux/security.h>
71739
71740 #include <net/inet_connection_sock.h>
71741 #include <net/inet_hashtables.h>
71742 #include <net/secure_seq.h>
71743 #include <net/ip.h>
71744
71745+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
71746+
71747 /*
71748 * Allocate and initialize a new local port bind bucket.
71749 * The bindhash mutex for snum's hash chain must be held here.
71750@@ -491,6 +494,8 @@ ok:
71751 }
71752 spin_unlock(&head->lock);
71753
71754+ gr_update_task_in_ip_table(current, inet_sk(sk));
71755+
71756 if (tw) {
71757 inet_twsk_deschedule(tw, death_row);
71758 inet_twsk_put(tw);
71759diff -urNp linux-2.6.32.45/net/ipv4/inetpeer.c linux-2.6.32.45/net/ipv4/inetpeer.c
71760--- linux-2.6.32.45/net/ipv4/inetpeer.c 2011-08-16 20:37:25.000000000 -0400
71761+++ linux-2.6.32.45/net/ipv4/inetpeer.c 2011-08-07 19:48:09.000000000 -0400
71762@@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 da
71763 struct inet_peer *p, *n;
71764 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
71765
71766+ pax_track_stack();
71767+
71768 /* Look up for the address quickly. */
71769 read_lock_bh(&peer_pool_lock);
71770 p = lookup(daddr, NULL);
71771@@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 da
71772 return NULL;
71773 n->v4daddr = daddr;
71774 atomic_set(&n->refcnt, 1);
71775- atomic_set(&n->rid, 0);
71776+ atomic_set_unchecked(&n->rid, 0);
71777 n->ip_id_count = secure_ip_id(daddr);
71778 n->tcp_ts_stamp = 0;
71779
71780diff -urNp linux-2.6.32.45/net/ipv4/ip_fragment.c linux-2.6.32.45/net/ipv4/ip_fragment.c
71781--- linux-2.6.32.45/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
71782+++ linux-2.6.32.45/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
71783@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
71784 return 0;
71785
71786 start = qp->rid;
71787- end = atomic_inc_return(&peer->rid);
71788+ end = atomic_inc_return_unchecked(&peer->rid);
71789 qp->rid = end;
71790
71791 rc = qp->q.fragments && (end - start) > max;
71792diff -urNp linux-2.6.32.45/net/ipv4/ip_sockglue.c linux-2.6.32.45/net/ipv4/ip_sockglue.c
71793--- linux-2.6.32.45/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
71794+++ linux-2.6.32.45/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
71795@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
71796 int val;
71797 int len;
71798
71799+ pax_track_stack();
71800+
71801 if (level != SOL_IP)
71802 return -EOPNOTSUPP;
71803
71804diff -urNp linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c
71805--- linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
71806+++ linux-2.6.32.45/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
71807@@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
71808 private = &tmp;
71809 }
71810 #endif
71811+ memset(&info, 0, sizeof(info));
71812 info.valid_hooks = t->valid_hooks;
71813 memcpy(info.hook_entry, private->hook_entry,
71814 sizeof(info.hook_entry));
71815diff -urNp linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c
71816--- linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c 2011-03-27 14:31:47.000000000 -0400
71817+++ linux-2.6.32.45/net/ipv4/netfilter/ip_queue.c 2011-08-21 18:42:53.000000000 -0400
71818@@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, st
71819
71820 if (v->data_len < sizeof(*user_iph))
71821 return 0;
71822+ if (v->data_len > 65535)
71823+ return -EMSGSIZE;
71824+
71825 diff = v->data_len - e->skb->len;
71826 if (diff < 0) {
71827 if (pskb_trim(e->skb, v->data_len))
71828@@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
71829 static inline void
71830 __ipq_rcv_skb(struct sk_buff *skb)
71831 {
71832- int status, type, pid, flags, nlmsglen, skblen;
71833+ int status, type, pid, flags;
71834+ unsigned int nlmsglen, skblen;
71835 struct nlmsghdr *nlh;
71836
71837 skblen = skb->len;
71838diff -urNp linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c
71839--- linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
71840+++ linux-2.6.32.45/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
71841@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
71842 private = &tmp;
71843 }
71844 #endif
71845+ memset(&info, 0, sizeof(info));
71846 info.valid_hooks = t->valid_hooks;
71847 memcpy(info.hook_entry, private->hook_entry,
71848 sizeof(info.hook_entry));
71849diff -urNp linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c
71850--- linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
71851+++ linux-2.6.32.45/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
71852@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
71853
71854 *len = 0;
71855
71856- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
71857+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
71858 if (*octets == NULL) {
71859 if (net_ratelimit())
71860 printk("OOM in bsalg (%d)\n", __LINE__);
71861diff -urNp linux-2.6.32.45/net/ipv4/raw.c linux-2.6.32.45/net/ipv4/raw.c
71862--- linux-2.6.32.45/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
71863+++ linux-2.6.32.45/net/ipv4/raw.c 2011-08-14 11:46:51.000000000 -0400
71864@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
71865 /* Charge it to the socket. */
71866
71867 if (sock_queue_rcv_skb(sk, skb) < 0) {
71868- atomic_inc(&sk->sk_drops);
71869+ atomic_inc_unchecked(&sk->sk_drops);
71870 kfree_skb(skb);
71871 return NET_RX_DROP;
71872 }
71873@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
71874 int raw_rcv(struct sock *sk, struct sk_buff *skb)
71875 {
71876 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
71877- atomic_inc(&sk->sk_drops);
71878+ atomic_inc_unchecked(&sk->sk_drops);
71879 kfree_skb(skb);
71880 return NET_RX_DROP;
71881 }
71882@@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
71883
71884 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
71885 {
71886+ struct icmp_filter filter;
71887+
71888+ if (optlen < 0)
71889+ return -EINVAL;
71890 if (optlen > sizeof(struct icmp_filter))
71891 optlen = sizeof(struct icmp_filter);
71892- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
71893+ if (copy_from_user(&filter, optval, optlen))
71894 return -EFAULT;
71895+ raw_sk(sk)->filter = filter;
71896+
71897 return 0;
71898 }
71899
71900 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
71901 {
71902 int len, ret = -EFAULT;
71903+ struct icmp_filter filter;
71904
71905 if (get_user(len, optlen))
71906 goto out;
71907@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
71908 if (len > sizeof(struct icmp_filter))
71909 len = sizeof(struct icmp_filter);
71910 ret = -EFAULT;
71911- if (put_user(len, optlen) ||
71912- copy_to_user(optval, &raw_sk(sk)->filter, len))
71913+ filter = raw_sk(sk)->filter;
71914+ if (put_user(len, optlen) || len > sizeof filter ||
71915+ copy_to_user(optval, &filter, len))
71916 goto out;
71917 ret = 0;
71918 out: return ret;
71919@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
71920 sk_wmem_alloc_get(sp),
71921 sk_rmem_alloc_get(sp),
71922 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
71923- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
71924+ atomic_read(&sp->sk_refcnt),
71925+#ifdef CONFIG_GRKERNSEC_HIDESYM
71926+ NULL,
71927+#else
71928+ sp,
71929+#endif
71930+ atomic_read_unchecked(&sp->sk_drops));
71931 }
71932
71933 static int raw_seq_show(struct seq_file *seq, void *v)
71934diff -urNp linux-2.6.32.45/net/ipv4/route.c linux-2.6.32.45/net/ipv4/route.c
71935--- linux-2.6.32.45/net/ipv4/route.c 2011-08-16 20:37:25.000000000 -0400
71936+++ linux-2.6.32.45/net/ipv4/route.c 2011-08-07 19:48:09.000000000 -0400
71937@@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be3
71938
71939 static inline int rt_genid(struct net *net)
71940 {
71941- return atomic_read(&net->ipv4.rt_genid);
71942+ return atomic_read_unchecked(&net->ipv4.rt_genid);
71943 }
71944
71945 #ifdef CONFIG_PROC_FS
71946@@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct n
71947 unsigned char shuffle;
71948
71949 get_random_bytes(&shuffle, sizeof(shuffle));
71950- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
71951+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
71952 }
71953
71954 /*
71955@@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_oper
71956
71957 static __net_init int rt_secret_timer_init(struct net *net)
71958 {
71959- atomic_set(&net->ipv4.rt_genid,
71960+ atomic_set_unchecked(&net->ipv4.rt_genid,
71961 (int) ((num_physpages ^ (num_physpages>>8)) ^
71962 (jiffies ^ (jiffies >> 7))));
71963
71964diff -urNp linux-2.6.32.45/net/ipv4/tcp.c linux-2.6.32.45/net/ipv4/tcp.c
71965--- linux-2.6.32.45/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
71966+++ linux-2.6.32.45/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
71967@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
71968 int val;
71969 int err = 0;
71970
71971+ pax_track_stack();
71972+
71973 /* This is a string value all the others are int's */
71974 if (optname == TCP_CONGESTION) {
71975 char name[TCP_CA_NAME_MAX];
71976@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
71977 struct tcp_sock *tp = tcp_sk(sk);
71978 int val, len;
71979
71980+ pax_track_stack();
71981+
71982 if (get_user(len, optlen))
71983 return -EFAULT;
71984
71985diff -urNp linux-2.6.32.45/net/ipv4/tcp_ipv4.c linux-2.6.32.45/net/ipv4/tcp_ipv4.c
71986--- linux-2.6.32.45/net/ipv4/tcp_ipv4.c 2011-08-16 20:37:25.000000000 -0400
71987+++ linux-2.6.32.45/net/ipv4/tcp_ipv4.c 2011-08-07 19:48:09.000000000 -0400
71988@@ -85,6 +85,9 @@
71989 int sysctl_tcp_tw_reuse __read_mostly;
71990 int sysctl_tcp_low_latency __read_mostly;
71991
71992+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71993+extern int grsec_enable_blackhole;
71994+#endif
71995
71996 #ifdef CONFIG_TCP_MD5SIG
71997 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
71998@@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
71999 return 0;
72000
72001 reset:
72002+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72003+ if (!grsec_enable_blackhole)
72004+#endif
72005 tcp_v4_send_reset(rsk, skb);
72006 discard:
72007 kfree_skb(skb);
72008@@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
72009 TCP_SKB_CB(skb)->sacked = 0;
72010
72011 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72012- if (!sk)
72013+ if (!sk) {
72014+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72015+ ret = 1;
72016+#endif
72017 goto no_tcp_socket;
72018+ }
72019
72020 process:
72021- if (sk->sk_state == TCP_TIME_WAIT)
72022+ if (sk->sk_state == TCP_TIME_WAIT) {
72023+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72024+ ret = 2;
72025+#endif
72026 goto do_time_wait;
72027+ }
72028
72029 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
72030 goto discard_and_relse;
72031@@ -1651,6 +1665,10 @@ no_tcp_socket:
72032 bad_packet:
72033 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72034 } else {
72035+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72036+ if (!grsec_enable_blackhole || (ret == 1 &&
72037+ (skb->dev->flags & IFF_LOOPBACK)))
72038+#endif
72039 tcp_v4_send_reset(NULL, skb);
72040 }
72041
72042@@ -2195,14 +2213,14 @@ int tcp_proc_register(struct net *net, s
72043 int rc = 0;
72044 struct proc_dir_entry *p;
72045
72046- afinfo->seq_fops.open = tcp_seq_open;
72047- afinfo->seq_fops.read = seq_read;
72048- afinfo->seq_fops.llseek = seq_lseek;
72049- afinfo->seq_fops.release = seq_release_net;
72050-
72051- afinfo->seq_ops.start = tcp_seq_start;
72052- afinfo->seq_ops.next = tcp_seq_next;
72053- afinfo->seq_ops.stop = tcp_seq_stop;
72054+ *(void **)&afinfo->seq_fops.open = tcp_seq_open;
72055+ *(void **)&afinfo->seq_fops.read = seq_read;
72056+ *(void **)&afinfo->seq_fops.llseek = seq_lseek;
72057+ *(void **)&afinfo->seq_fops.release = seq_release_net;
72058+
72059+ *(void **)&afinfo->seq_ops.start = tcp_seq_start;
72060+ *(void **)&afinfo->seq_ops.next = tcp_seq_next;
72061+ *(void **)&afinfo->seq_ops.stop = tcp_seq_stop;
72062
72063 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
72064 &afinfo->seq_fops, afinfo);
72065@@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk
72066 0, /* non standard timer */
72067 0, /* open_requests have no inode */
72068 atomic_read(&sk->sk_refcnt),
72069+#ifdef CONFIG_GRKERNSEC_HIDESYM
72070+ NULL,
72071+#else
72072 req,
72073+#endif
72074 len);
72075 }
72076
72077@@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *s
72078 sock_i_uid(sk),
72079 icsk->icsk_probes_out,
72080 sock_i_ino(sk),
72081- atomic_read(&sk->sk_refcnt), sk,
72082+ atomic_read(&sk->sk_refcnt),
72083+#ifdef CONFIG_GRKERNSEC_HIDESYM
72084+ NULL,
72085+#else
72086+ sk,
72087+#endif
72088 jiffies_to_clock_t(icsk->icsk_rto),
72089 jiffies_to_clock_t(icsk->icsk_ack.ato),
72090 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
72091@@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct in
72092 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
72093 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
72094 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72095- atomic_read(&tw->tw_refcnt), tw, len);
72096+ atomic_read(&tw->tw_refcnt),
72097+#ifdef CONFIG_GRKERNSEC_HIDESYM
72098+ NULL,
72099+#else
72100+ tw,
72101+#endif
72102+ len);
72103 }
72104
72105 #define TMPSZ 150
72106diff -urNp linux-2.6.32.45/net/ipv4/tcp_minisocks.c linux-2.6.32.45/net/ipv4/tcp_minisocks.c
72107--- linux-2.6.32.45/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
72108+++ linux-2.6.32.45/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
72109@@ -26,6 +26,10 @@
72110 #include <net/inet_common.h>
72111 #include <net/xfrm.h>
72112
72113+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72114+extern int grsec_enable_blackhole;
72115+#endif
72116+
72117 #ifdef CONFIG_SYSCTL
72118 #define SYNC_INIT 0 /* let the user enable it */
72119 #else
72120@@ -672,6 +676,10 @@ listen_overflow:
72121
72122 embryonic_reset:
72123 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
72124+
72125+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72126+ if (!grsec_enable_blackhole)
72127+#endif
72128 if (!(flg & TCP_FLAG_RST))
72129 req->rsk_ops->send_reset(sk, skb);
72130
72131diff -urNp linux-2.6.32.45/net/ipv4/tcp_output.c linux-2.6.32.45/net/ipv4/tcp_output.c
72132--- linux-2.6.32.45/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
72133+++ linux-2.6.32.45/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
72134@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
72135 __u8 *md5_hash_location;
72136 int mss;
72137
72138+ pax_track_stack();
72139+
72140 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
72141 if (skb == NULL)
72142 return NULL;
72143diff -urNp linux-2.6.32.45/net/ipv4/tcp_probe.c linux-2.6.32.45/net/ipv4/tcp_probe.c
72144--- linux-2.6.32.45/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
72145+++ linux-2.6.32.45/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
72146@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
72147 if (cnt + width >= len)
72148 break;
72149
72150- if (copy_to_user(buf + cnt, tbuf, width))
72151+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
72152 return -EFAULT;
72153 cnt += width;
72154 }
72155diff -urNp linux-2.6.32.45/net/ipv4/tcp_timer.c linux-2.6.32.45/net/ipv4/tcp_timer.c
72156--- linux-2.6.32.45/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
72157+++ linux-2.6.32.45/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
72158@@ -21,6 +21,10 @@
72159 #include <linux/module.h>
72160 #include <net/tcp.h>
72161
72162+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72163+extern int grsec_lastack_retries;
72164+#endif
72165+
72166 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
72167 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
72168 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
72169@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
72170 }
72171 }
72172
72173+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72174+ if ((sk->sk_state == TCP_LAST_ACK) &&
72175+ (grsec_lastack_retries > 0) &&
72176+ (grsec_lastack_retries < retry_until))
72177+ retry_until = grsec_lastack_retries;
72178+#endif
72179+
72180 if (retransmits_timed_out(sk, retry_until)) {
72181 /* Has it gone just too far? */
72182 tcp_write_err(sk);
72183diff -urNp linux-2.6.32.45/net/ipv4/udp.c linux-2.6.32.45/net/ipv4/udp.c
72184--- linux-2.6.32.45/net/ipv4/udp.c 2011-07-13 17:23:04.000000000 -0400
72185+++ linux-2.6.32.45/net/ipv4/udp.c 2011-08-05 20:33:55.000000000 -0400
72186@@ -86,6 +86,7 @@
72187 #include <linux/types.h>
72188 #include <linux/fcntl.h>
72189 #include <linux/module.h>
72190+#include <linux/security.h>
72191 #include <linux/socket.h>
72192 #include <linux/sockios.h>
72193 #include <linux/igmp.h>
72194@@ -106,6 +107,10 @@
72195 #include <net/xfrm.h>
72196 #include "udp_impl.h"
72197
72198+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72199+extern int grsec_enable_blackhole;
72200+#endif
72201+
72202 struct udp_table udp_table;
72203 EXPORT_SYMBOL(udp_table);
72204
72205@@ -371,6 +376,9 @@ found:
72206 return s;
72207 }
72208
72209+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
72210+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
72211+
72212 /*
72213 * This routine is called by the ICMP module when it gets some
72214 * sort of error condition. If err < 0 then the socket should
72215@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
72216 dport = usin->sin_port;
72217 if (dport == 0)
72218 return -EINVAL;
72219+
72220+ err = gr_search_udp_sendmsg(sk, usin);
72221+ if (err)
72222+ return err;
72223 } else {
72224 if (sk->sk_state != TCP_ESTABLISHED)
72225 return -EDESTADDRREQ;
72226+
72227+ err = gr_search_udp_sendmsg(sk, NULL);
72228+ if (err)
72229+ return err;
72230+
72231 daddr = inet->daddr;
72232 dport = inet->dport;
72233 /* Open fast path for connected socket.
72234@@ -945,6 +962,10 @@ try_again:
72235 if (!skb)
72236 goto out;
72237
72238+ err = gr_search_udp_recvmsg(sk, skb);
72239+ if (err)
72240+ goto out_free;
72241+
72242 ulen = skb->len - sizeof(struct udphdr);
72243 copied = len;
72244 if (copied > ulen)
72245@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct so
72246 if (rc == -ENOMEM) {
72247 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
72248 is_udplite);
72249- atomic_inc(&sk->sk_drops);
72250+ atomic_inc_unchecked(&sk->sk_drops);
72251 }
72252 goto drop;
72253 }
72254@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
72255 goto csum_error;
72256
72257 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
72258+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72259+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72260+#endif
72261 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
72262
72263 /*
72264@@ -1719,14 +1743,14 @@ int udp_proc_register(struct net *net, s
72265 struct proc_dir_entry *p;
72266 int rc = 0;
72267
72268- afinfo->seq_fops.open = udp_seq_open;
72269- afinfo->seq_fops.read = seq_read;
72270- afinfo->seq_fops.llseek = seq_lseek;
72271- afinfo->seq_fops.release = seq_release_net;
72272-
72273- afinfo->seq_ops.start = udp_seq_start;
72274- afinfo->seq_ops.next = udp_seq_next;
72275- afinfo->seq_ops.stop = udp_seq_stop;
72276+ *(void **)&afinfo->seq_fops.open = udp_seq_open;
72277+ *(void **)&afinfo->seq_fops.read = seq_read;
72278+ *(void **)&afinfo->seq_fops.llseek = seq_lseek;
72279+ *(void **)&afinfo->seq_fops.release = seq_release_net;
72280+
72281+ *(void **)&afinfo->seq_ops.start = udp_seq_start;
72282+ *(void **)&afinfo->seq_ops.next = udp_seq_next;
72283+ *(void **)&afinfo->seq_ops.stop = udp_seq_stop;
72284
72285 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
72286 &afinfo->seq_fops, afinfo);
72287@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock
72288 sk_wmem_alloc_get(sp),
72289 sk_rmem_alloc_get(sp),
72290 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72291- atomic_read(&sp->sk_refcnt), sp,
72292- atomic_read(&sp->sk_drops), len);
72293+ atomic_read(&sp->sk_refcnt),
72294+#ifdef CONFIG_GRKERNSEC_HIDESYM
72295+ NULL,
72296+#else
72297+ sp,
72298+#endif
72299+ atomic_read_unchecked(&sp->sk_drops), len);
72300 }
72301
72302 int udp4_seq_show(struct seq_file *seq, void *v)
72303diff -urNp linux-2.6.32.45/net/ipv6/inet6_connection_sock.c linux-2.6.32.45/net/ipv6/inet6_connection_sock.c
72304--- linux-2.6.32.45/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
72305+++ linux-2.6.32.45/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
72306@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
72307 #ifdef CONFIG_XFRM
72308 {
72309 struct rt6_info *rt = (struct rt6_info *)dst;
72310- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
72311+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
72312 }
72313 #endif
72314 }
72315@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
72316 #ifdef CONFIG_XFRM
72317 if (dst) {
72318 struct rt6_info *rt = (struct rt6_info *)dst;
72319- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
72320+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
72321 sk->sk_dst_cache = NULL;
72322 dst_release(dst);
72323 dst = NULL;
72324diff -urNp linux-2.6.32.45/net/ipv6/inet6_hashtables.c linux-2.6.32.45/net/ipv6/inet6_hashtables.c
72325--- linux-2.6.32.45/net/ipv6/inet6_hashtables.c 2011-08-16 20:37:25.000000000 -0400
72326+++ linux-2.6.32.45/net/ipv6/inet6_hashtables.c 2011-08-07 19:48:09.000000000 -0400
72327@@ -119,7 +119,7 @@ out:
72328 }
72329 EXPORT_SYMBOL(__inet6_lookup_established);
72330
72331-static int inline compute_score(struct sock *sk, struct net *net,
72332+static inline int compute_score(struct sock *sk, struct net *net,
72333 const unsigned short hnum,
72334 const struct in6_addr *daddr,
72335 const int dif)
72336diff -urNp linux-2.6.32.45/net/ipv6/ipv6_sockglue.c linux-2.6.32.45/net/ipv6/ipv6_sockglue.c
72337--- linux-2.6.32.45/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
72338+++ linux-2.6.32.45/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
72339@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
72340 int val, valbool;
72341 int retv = -ENOPROTOOPT;
72342
72343+ pax_track_stack();
72344+
72345 if (optval == NULL)
72346 val=0;
72347 else {
72348@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
72349 int len;
72350 int val;
72351
72352+ pax_track_stack();
72353+
72354 if (ip6_mroute_opt(optname))
72355 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
72356
72357diff -urNp linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c
72358--- linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c 2011-03-27 14:31:47.000000000 -0400
72359+++ linux-2.6.32.45/net/ipv6/netfilter/ip6_queue.c 2011-08-21 18:43:32.000000000 -0400
72360@@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, st
72361
72362 if (v->data_len < sizeof(*user_iph))
72363 return 0;
72364+ if (v->data_len > 65535)
72365+ return -EMSGSIZE;
72366+
72367 diff = v->data_len - e->skb->len;
72368 if (diff < 0) {
72369 if (pskb_trim(e->skb, v->data_len))
72370@@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
72371 static inline void
72372 __ipq_rcv_skb(struct sk_buff *skb)
72373 {
72374- int status, type, pid, flags, nlmsglen, skblen;
72375+ int status, type, pid, flags;
72376+ unsigned int nlmsglen, skblen;
72377 struct nlmsghdr *nlh;
72378
72379 skblen = skb->len;
72380diff -urNp linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c
72381--- linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
72382+++ linux-2.6.32.45/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
72383@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
72384 private = &tmp;
72385 }
72386 #endif
72387+ memset(&info, 0, sizeof(info));
72388 info.valid_hooks = t->valid_hooks;
72389 memcpy(info.hook_entry, private->hook_entry,
72390 sizeof(info.hook_entry));
72391diff -urNp linux-2.6.32.45/net/ipv6/raw.c linux-2.6.32.45/net/ipv6/raw.c
72392--- linux-2.6.32.45/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
72393+++ linux-2.6.32.45/net/ipv6/raw.c 2011-08-14 11:48:20.000000000 -0400
72394@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
72395 {
72396 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
72397 skb_checksum_complete(skb)) {
72398- atomic_inc(&sk->sk_drops);
72399+ atomic_inc_unchecked(&sk->sk_drops);
72400 kfree_skb(skb);
72401 return NET_RX_DROP;
72402 }
72403
72404 /* Charge it to the socket. */
72405 if (sock_queue_rcv_skb(sk,skb)<0) {
72406- atomic_inc(&sk->sk_drops);
72407+ atomic_inc_unchecked(&sk->sk_drops);
72408 kfree_skb(skb);
72409 return NET_RX_DROP;
72410 }
72411@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72412 struct raw6_sock *rp = raw6_sk(sk);
72413
72414 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
72415- atomic_inc(&sk->sk_drops);
72416+ atomic_inc_unchecked(&sk->sk_drops);
72417 kfree_skb(skb);
72418 return NET_RX_DROP;
72419 }
72420@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72421
72422 if (inet->hdrincl) {
72423 if (skb_checksum_complete(skb)) {
72424- atomic_inc(&sk->sk_drops);
72425+ atomic_inc_unchecked(&sk->sk_drops);
72426 kfree_skb(skb);
72427 return NET_RX_DROP;
72428 }
72429@@ -518,7 +518,7 @@ csum_copy_err:
72430 as some normal condition.
72431 */
72432 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
72433- atomic_inc(&sk->sk_drops);
72434+ atomic_inc_unchecked(&sk->sk_drops);
72435 goto out;
72436 }
72437
72438@@ -600,7 +600,7 @@ out:
72439 return err;
72440 }
72441
72442-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
72443+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
72444 struct flowi *fl, struct rt6_info *rt,
72445 unsigned int flags)
72446 {
72447@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
72448 u16 proto;
72449 int err;
72450
72451+ pax_track_stack();
72452+
72453 /* Rough check on arithmetic overflow,
72454 better check is made in ip6_append_data().
72455 */
72456@@ -916,12 +918,17 @@ do_confirm:
72457 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
72458 char __user *optval, int optlen)
72459 {
72460+ struct icmp6_filter filter;
72461+
72462 switch (optname) {
72463 case ICMPV6_FILTER:
72464+ if (optlen < 0)
72465+ return -EINVAL;
72466 if (optlen > sizeof(struct icmp6_filter))
72467 optlen = sizeof(struct icmp6_filter);
72468- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
72469+ if (copy_from_user(&filter, optval, optlen))
72470 return -EFAULT;
72471+ raw6_sk(sk)->filter = filter;
72472 return 0;
72473 default:
72474 return -ENOPROTOOPT;
72475@@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct so
72476 char __user *optval, int __user *optlen)
72477 {
72478 int len;
72479+ struct icmp6_filter filter;
72480
72481 switch (optname) {
72482 case ICMPV6_FILTER:
72483@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
72484 len = sizeof(struct icmp6_filter);
72485 if (put_user(len, optlen))
72486 return -EFAULT;
72487- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
72488+ filter = raw6_sk(sk)->filter;
72489+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
72490 return -EFAULT;
72491 return 0;
72492 default:
72493@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
72494 0, 0L, 0,
72495 sock_i_uid(sp), 0,
72496 sock_i_ino(sp),
72497- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72498+ atomic_read(&sp->sk_refcnt),
72499+#ifdef CONFIG_GRKERNSEC_HIDESYM
72500+ NULL,
72501+#else
72502+ sp,
72503+#endif
72504+ atomic_read_unchecked(&sp->sk_drops));
72505 }
72506
72507 static int raw6_seq_show(struct seq_file *seq, void *v)
72508diff -urNp linux-2.6.32.45/net/ipv6/tcp_ipv6.c linux-2.6.32.45/net/ipv6/tcp_ipv6.c
72509--- linux-2.6.32.45/net/ipv6/tcp_ipv6.c 2011-08-16 20:37:25.000000000 -0400
72510+++ linux-2.6.32.45/net/ipv6/tcp_ipv6.c 2011-08-07 19:48:09.000000000 -0400
72511@@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
72512 }
72513 #endif
72514
72515+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72516+extern int grsec_enable_blackhole;
72517+#endif
72518+
72519 static void tcp_v6_hash(struct sock *sk)
72520 {
72521 if (sk->sk_state != TCP_CLOSE) {
72522@@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk
72523 return 0;
72524
72525 reset:
72526+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72527+ if (!grsec_enable_blackhole)
72528+#endif
72529 tcp_v6_send_reset(sk, skb);
72530 discard:
72531 if (opt_skb)
72532@@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
72533 TCP_SKB_CB(skb)->sacked = 0;
72534
72535 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72536- if (!sk)
72537+ if (!sk) {
72538+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72539+ ret = 1;
72540+#endif
72541 goto no_tcp_socket;
72542+ }
72543
72544 process:
72545- if (sk->sk_state == TCP_TIME_WAIT)
72546+ if (sk->sk_state == TCP_TIME_WAIT) {
72547+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72548+ ret = 2;
72549+#endif
72550 goto do_time_wait;
72551+ }
72552
72553 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
72554 goto discard_and_relse;
72555@@ -1701,6 +1716,10 @@ no_tcp_socket:
72556 bad_packet:
72557 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72558 } else {
72559+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72560+ if (!grsec_enable_blackhole || (ret == 1 &&
72561+ (skb->dev->flags & IFF_LOOPBACK)))
72562+#endif
72563 tcp_v6_send_reset(NULL, skb);
72564 }
72565
72566@@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file
72567 uid,
72568 0, /* non standard timer */
72569 0, /* open_requests have no inode */
72570- 0, req);
72571+ 0,
72572+#ifdef CONFIG_GRKERNSEC_HIDESYM
72573+ NULL
72574+#else
72575+ req
72576+#endif
72577+ );
72578 }
72579
72580 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
72581@@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_fil
72582 sock_i_uid(sp),
72583 icsk->icsk_probes_out,
72584 sock_i_ino(sp),
72585- atomic_read(&sp->sk_refcnt), sp,
72586+ atomic_read(&sp->sk_refcnt),
72587+#ifdef CONFIG_GRKERNSEC_HIDESYM
72588+ NULL,
72589+#else
72590+ sp,
72591+#endif
72592 jiffies_to_clock_t(icsk->icsk_rto),
72593 jiffies_to_clock_t(icsk->icsk_ack.ato),
72594 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
72595@@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct se
72596 dest->s6_addr32[2], dest->s6_addr32[3], destp,
72597 tw->tw_substate, 0, 0,
72598 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72599- atomic_read(&tw->tw_refcnt), tw);
72600+ atomic_read(&tw->tw_refcnt),
72601+#ifdef CONFIG_GRKERNSEC_HIDESYM
72602+ NULL
72603+#else
72604+ tw
72605+#endif
72606+ );
72607 }
72608
72609 static int tcp6_seq_show(struct seq_file *seq, void *v)
72610diff -urNp linux-2.6.32.45/net/ipv6/udp.c linux-2.6.32.45/net/ipv6/udp.c
72611--- linux-2.6.32.45/net/ipv6/udp.c 2011-07-13 17:23:04.000000000 -0400
72612+++ linux-2.6.32.45/net/ipv6/udp.c 2011-07-13 17:23:27.000000000 -0400
72613@@ -49,6 +49,10 @@
72614 #include <linux/seq_file.h>
72615 #include "udp_impl.h"
72616
72617+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72618+extern int grsec_enable_blackhole;
72619+#endif
72620+
72621 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
72622 {
72623 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
72624@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
72625 if (rc == -ENOMEM) {
72626 UDP6_INC_STATS_BH(sock_net(sk),
72627 UDP_MIB_RCVBUFERRORS, is_udplite);
72628- atomic_inc(&sk->sk_drops);
72629+ atomic_inc_unchecked(&sk->sk_drops);
72630 }
72631 goto drop;
72632 }
72633@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
72634 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
72635 proto == IPPROTO_UDPLITE);
72636
72637+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72638+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72639+#endif
72640 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
72641
72642 kfree_skb(skb);
72643@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct se
72644 0, 0L, 0,
72645 sock_i_uid(sp), 0,
72646 sock_i_ino(sp),
72647- atomic_read(&sp->sk_refcnt), sp,
72648- atomic_read(&sp->sk_drops));
72649+ atomic_read(&sp->sk_refcnt),
72650+#ifdef CONFIG_GRKERNSEC_HIDESYM
72651+ NULL,
72652+#else
72653+ sp,
72654+#endif
72655+ atomic_read_unchecked(&sp->sk_drops));
72656 }
72657
72658 int udp6_seq_show(struct seq_file *seq, void *v)
72659diff -urNp linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c
72660--- linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
72661+++ linux-2.6.32.45/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
72662@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
72663 add_wait_queue(&self->open_wait, &wait);
72664
72665 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
72666- __FILE__,__LINE__, tty->driver->name, self->open_count );
72667+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72668
72669 /* As far as I can see, we protect open_count - Jean II */
72670 spin_lock_irqsave(&self->spinlock, flags);
72671 if (!tty_hung_up_p(filp)) {
72672 extra_count = 1;
72673- self->open_count--;
72674+ local_dec(&self->open_count);
72675 }
72676 spin_unlock_irqrestore(&self->spinlock, flags);
72677- self->blocked_open++;
72678+ local_inc(&self->blocked_open);
72679
72680 while (1) {
72681 if (tty->termios->c_cflag & CBAUD) {
72682@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
72683 }
72684
72685 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
72686- __FILE__,__LINE__, tty->driver->name, self->open_count );
72687+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
72688
72689 schedule();
72690 }
72691@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
72692 if (extra_count) {
72693 /* ++ is not atomic, so this should be protected - Jean II */
72694 spin_lock_irqsave(&self->spinlock, flags);
72695- self->open_count++;
72696+ local_inc(&self->open_count);
72697 spin_unlock_irqrestore(&self->spinlock, flags);
72698 }
72699- self->blocked_open--;
72700+ local_dec(&self->blocked_open);
72701
72702 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
72703- __FILE__,__LINE__, tty->driver->name, self->open_count);
72704+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
72705
72706 if (!retval)
72707 self->flags |= ASYNC_NORMAL_ACTIVE;
72708@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
72709 }
72710 /* ++ is not atomic, so this should be protected - Jean II */
72711 spin_lock_irqsave(&self->spinlock, flags);
72712- self->open_count++;
72713+ local_inc(&self->open_count);
72714
72715 tty->driver_data = self;
72716 self->tty = tty;
72717 spin_unlock_irqrestore(&self->spinlock, flags);
72718
72719 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
72720- self->line, self->open_count);
72721+ self->line, local_read(&self->open_count));
72722
72723 /* Not really used by us, but lets do it anyway */
72724 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
72725@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
72726 return;
72727 }
72728
72729- if ((tty->count == 1) && (self->open_count != 1)) {
72730+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
72731 /*
72732 * Uh, oh. tty->count is 1, which means that the tty
72733 * structure will be freed. state->count should always
72734@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
72735 */
72736 IRDA_DEBUG(0, "%s(), bad serial port count; "
72737 "tty->count is 1, state->count is %d\n", __func__ ,
72738- self->open_count);
72739- self->open_count = 1;
72740+ local_read(&self->open_count));
72741+ local_set(&self->open_count, 1);
72742 }
72743
72744- if (--self->open_count < 0) {
72745+ if (local_dec_return(&self->open_count) < 0) {
72746 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
72747- __func__, self->line, self->open_count);
72748- self->open_count = 0;
72749+ __func__, self->line, local_read(&self->open_count));
72750+ local_set(&self->open_count, 0);
72751 }
72752- if (self->open_count) {
72753+ if (local_read(&self->open_count)) {
72754 spin_unlock_irqrestore(&self->spinlock, flags);
72755
72756 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
72757@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
72758 tty->closing = 0;
72759 self->tty = NULL;
72760
72761- if (self->blocked_open) {
72762+ if (local_read(&self->blocked_open)) {
72763 if (self->close_delay)
72764 schedule_timeout_interruptible(self->close_delay);
72765 wake_up_interruptible(&self->open_wait);
72766@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
72767 spin_lock_irqsave(&self->spinlock, flags);
72768 self->flags &= ~ASYNC_NORMAL_ACTIVE;
72769 self->tty = NULL;
72770- self->open_count = 0;
72771+ local_set(&self->open_count, 0);
72772 spin_unlock_irqrestore(&self->spinlock, flags);
72773
72774 wake_up_interruptible(&self->open_wait);
72775@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
72776 seq_putc(m, '\n');
72777
72778 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
72779- seq_printf(m, "Open count: %d\n", self->open_count);
72780+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
72781 seq_printf(m, "Max data size: %d\n", self->max_data_size);
72782 seq_printf(m, "Max header size: %d\n", self->max_header_size);
72783
72784diff -urNp linux-2.6.32.45/net/iucv/af_iucv.c linux-2.6.32.45/net/iucv/af_iucv.c
72785--- linux-2.6.32.45/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
72786+++ linux-2.6.32.45/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
72787@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
72788
72789 write_lock_bh(&iucv_sk_list.lock);
72790
72791- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
72792+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72793 while (__iucv_get_sock_by_name(name)) {
72794 sprintf(name, "%08x",
72795- atomic_inc_return(&iucv_sk_list.autobind_name));
72796+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
72797 }
72798
72799 write_unlock_bh(&iucv_sk_list.lock);
72800diff -urNp linux-2.6.32.45/net/key/af_key.c linux-2.6.32.45/net/key/af_key.c
72801--- linux-2.6.32.45/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
72802+++ linux-2.6.32.45/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
72803@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
72804 struct xfrm_migrate m[XFRM_MAX_DEPTH];
72805 struct xfrm_kmaddress k;
72806
72807+ pax_track_stack();
72808+
72809 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
72810 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
72811 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
72812@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
72813 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
72814 else
72815 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
72816+#ifdef CONFIG_GRKERNSEC_HIDESYM
72817+ NULL,
72818+#else
72819 s,
72820+#endif
72821 atomic_read(&s->sk_refcnt),
72822 sk_rmem_alloc_get(s),
72823 sk_wmem_alloc_get(s),
72824diff -urNp linux-2.6.32.45/net/lapb/lapb_iface.c linux-2.6.32.45/net/lapb/lapb_iface.c
72825--- linux-2.6.32.45/net/lapb/lapb_iface.c 2011-03-27 14:31:47.000000000 -0400
72826+++ linux-2.6.32.45/net/lapb/lapb_iface.c 2011-08-05 20:33:55.000000000 -0400
72827@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev
72828 goto out;
72829
72830 lapb->dev = dev;
72831- lapb->callbacks = *callbacks;
72832+ lapb->callbacks = callbacks;
72833
72834 __lapb_insert_cb(lapb);
72835
72836@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device
72837
72838 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
72839 {
72840- if (lapb->callbacks.connect_confirmation)
72841- lapb->callbacks.connect_confirmation(lapb->dev, reason);
72842+ if (lapb->callbacks->connect_confirmation)
72843+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
72844 }
72845
72846 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
72847 {
72848- if (lapb->callbacks.connect_indication)
72849- lapb->callbacks.connect_indication(lapb->dev, reason);
72850+ if (lapb->callbacks->connect_indication)
72851+ lapb->callbacks->connect_indication(lapb->dev, reason);
72852 }
72853
72854 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
72855 {
72856- if (lapb->callbacks.disconnect_confirmation)
72857- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
72858+ if (lapb->callbacks->disconnect_confirmation)
72859+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
72860 }
72861
72862 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
72863 {
72864- if (lapb->callbacks.disconnect_indication)
72865- lapb->callbacks.disconnect_indication(lapb->dev, reason);
72866+ if (lapb->callbacks->disconnect_indication)
72867+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
72868 }
72869
72870 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
72871 {
72872- if (lapb->callbacks.data_indication)
72873- return lapb->callbacks.data_indication(lapb->dev, skb);
72874+ if (lapb->callbacks->data_indication)
72875+ return lapb->callbacks->data_indication(lapb->dev, skb);
72876
72877 kfree_skb(skb);
72878 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
72879@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *l
72880 {
72881 int used = 0;
72882
72883- if (lapb->callbacks.data_transmit) {
72884- lapb->callbacks.data_transmit(lapb->dev, skb);
72885+ if (lapb->callbacks->data_transmit) {
72886+ lapb->callbacks->data_transmit(lapb->dev, skb);
72887 used = 1;
72888 }
72889
72890diff -urNp linux-2.6.32.45/net/mac80211/cfg.c linux-2.6.32.45/net/mac80211/cfg.c
72891--- linux-2.6.32.45/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
72892+++ linux-2.6.32.45/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
72893@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
72894 return err;
72895 }
72896
72897-struct cfg80211_ops mac80211_config_ops = {
72898+const struct cfg80211_ops mac80211_config_ops = {
72899 .add_virtual_intf = ieee80211_add_iface,
72900 .del_virtual_intf = ieee80211_del_iface,
72901 .change_virtual_intf = ieee80211_change_iface,
72902diff -urNp linux-2.6.32.45/net/mac80211/cfg.h linux-2.6.32.45/net/mac80211/cfg.h
72903--- linux-2.6.32.45/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
72904+++ linux-2.6.32.45/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
72905@@ -4,6 +4,6 @@
72906 #ifndef __CFG_H
72907 #define __CFG_H
72908
72909-extern struct cfg80211_ops mac80211_config_ops;
72910+extern const struct cfg80211_ops mac80211_config_ops;
72911
72912 #endif /* __CFG_H */
72913diff -urNp linux-2.6.32.45/net/mac80211/debugfs_key.c linux-2.6.32.45/net/mac80211/debugfs_key.c
72914--- linux-2.6.32.45/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
72915+++ linux-2.6.32.45/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
72916@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
72917 size_t count, loff_t *ppos)
72918 {
72919 struct ieee80211_key *key = file->private_data;
72920- int i, res, bufsize = 2 * key->conf.keylen + 2;
72921+ int i, bufsize = 2 * key->conf.keylen + 2;
72922 char *buf = kmalloc(bufsize, GFP_KERNEL);
72923 char *p = buf;
72924+ ssize_t res;
72925+
72926+ if (buf == NULL)
72927+ return -ENOMEM;
72928
72929 for (i = 0; i < key->conf.keylen; i++)
72930 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
72931diff -urNp linux-2.6.32.45/net/mac80211/debugfs_sta.c linux-2.6.32.45/net/mac80211/debugfs_sta.c
72932--- linux-2.6.32.45/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
72933+++ linux-2.6.32.45/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
72934@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
72935 int i;
72936 struct sta_info *sta = file->private_data;
72937
72938+ pax_track_stack();
72939+
72940 spin_lock_bh(&sta->lock);
72941 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
72942 sta->ampdu_mlme.dialog_token_allocator + 1);
72943diff -urNp linux-2.6.32.45/net/mac80211/ieee80211_i.h linux-2.6.32.45/net/mac80211/ieee80211_i.h
72944--- linux-2.6.32.45/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
72945+++ linux-2.6.32.45/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
72946@@ -25,6 +25,7 @@
72947 #include <linux/etherdevice.h>
72948 #include <net/cfg80211.h>
72949 #include <net/mac80211.h>
72950+#include <asm/local.h>
72951 #include "key.h"
72952 #include "sta_info.h"
72953
72954@@ -635,7 +636,7 @@ struct ieee80211_local {
72955 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
72956 spinlock_t queue_stop_reason_lock;
72957
72958- int open_count;
72959+ local_t open_count;
72960 int monitors, cooked_mntrs;
72961 /* number of interfaces with corresponding FIF_ flags */
72962 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
72963diff -urNp linux-2.6.32.45/net/mac80211/iface.c linux-2.6.32.45/net/mac80211/iface.c
72964--- linux-2.6.32.45/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
72965+++ linux-2.6.32.45/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
72966@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
72967 break;
72968 }
72969
72970- if (local->open_count == 0) {
72971+ if (local_read(&local->open_count) == 0) {
72972 res = drv_start(local);
72973 if (res)
72974 goto err_del_bss;
72975@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
72976 * Validate the MAC address for this device.
72977 */
72978 if (!is_valid_ether_addr(dev->dev_addr)) {
72979- if (!local->open_count)
72980+ if (!local_read(&local->open_count))
72981 drv_stop(local);
72982 return -EADDRNOTAVAIL;
72983 }
72984@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
72985
72986 hw_reconf_flags |= __ieee80211_recalc_idle(local);
72987
72988- local->open_count++;
72989+ local_inc(&local->open_count);
72990 if (hw_reconf_flags) {
72991 ieee80211_hw_config(local, hw_reconf_flags);
72992 /*
72993@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
72994 err_del_interface:
72995 drv_remove_interface(local, &conf);
72996 err_stop:
72997- if (!local->open_count)
72998+ if (!local_read(&local->open_count))
72999 drv_stop(local);
73000 err_del_bss:
73001 sdata->bss = NULL;
73002@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
73003 WARN_ON(!list_empty(&sdata->u.ap.vlans));
73004 }
73005
73006- local->open_count--;
73007+ local_dec(&local->open_count);
73008
73009 switch (sdata->vif.type) {
73010 case NL80211_IFTYPE_AP_VLAN:
73011@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
73012
73013 ieee80211_recalc_ps(local, -1);
73014
73015- if (local->open_count == 0) {
73016+ if (local_read(&local->open_count) == 0) {
73017 ieee80211_clear_tx_pending(local);
73018 ieee80211_stop_device(local);
73019
73020diff -urNp linux-2.6.32.45/net/mac80211/main.c linux-2.6.32.45/net/mac80211/main.c
73021--- linux-2.6.32.45/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
73022+++ linux-2.6.32.45/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
73023@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
73024 local->hw.conf.power_level = power;
73025 }
73026
73027- if (changed && local->open_count) {
73028+ if (changed && local_read(&local->open_count)) {
73029 ret = drv_config(local, changed);
73030 /*
73031 * Goal:
73032diff -urNp linux-2.6.32.45/net/mac80211/mlme.c linux-2.6.32.45/net/mac80211/mlme.c
73033--- linux-2.6.32.45/net/mac80211/mlme.c 2011-08-09 18:35:30.000000000 -0400
73034+++ linux-2.6.32.45/net/mac80211/mlme.c 2011-08-09 18:34:01.000000000 -0400
73035@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
73036 bool have_higher_than_11mbit = false, newsta = false;
73037 u16 ap_ht_cap_flags;
73038
73039+ pax_track_stack();
73040+
73041 /*
73042 * AssocResp and ReassocResp have identical structure, so process both
73043 * of them in this function.
73044diff -urNp linux-2.6.32.45/net/mac80211/pm.c linux-2.6.32.45/net/mac80211/pm.c
73045--- linux-2.6.32.45/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
73046+++ linux-2.6.32.45/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
73047@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
73048 }
73049
73050 /* stop hardware - this must stop RX */
73051- if (local->open_count)
73052+ if (local_read(&local->open_count))
73053 ieee80211_stop_device(local);
73054
73055 local->suspended = true;
73056diff -urNp linux-2.6.32.45/net/mac80211/rate.c linux-2.6.32.45/net/mac80211/rate.c
73057--- linux-2.6.32.45/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
73058+++ linux-2.6.32.45/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
73059@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
73060 struct rate_control_ref *ref, *old;
73061
73062 ASSERT_RTNL();
73063- if (local->open_count)
73064+ if (local_read(&local->open_count))
73065 return -EBUSY;
73066
73067 ref = rate_control_alloc(name, local);
73068diff -urNp linux-2.6.32.45/net/mac80211/tx.c linux-2.6.32.45/net/mac80211/tx.c
73069--- linux-2.6.32.45/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
73070+++ linux-2.6.32.45/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
73071@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
73072 return cpu_to_le16(dur);
73073 }
73074
73075-static int inline is_ieee80211_device(struct ieee80211_local *local,
73076+static inline int is_ieee80211_device(struct ieee80211_local *local,
73077 struct net_device *dev)
73078 {
73079 return local == wdev_priv(dev->ieee80211_ptr);
73080diff -urNp linux-2.6.32.45/net/mac80211/util.c linux-2.6.32.45/net/mac80211/util.c
73081--- linux-2.6.32.45/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
73082+++ linux-2.6.32.45/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
73083@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
73084 local->resuming = true;
73085
73086 /* restart hardware */
73087- if (local->open_count) {
73088+ if (local_read(&local->open_count)) {
73089 /*
73090 * Upon resume hardware can sometimes be goofy due to
73091 * various platform / driver / bus issues, so restarting
73092diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c
73093--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
73094+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
73095@@ -564,7 +564,7 @@ static const struct file_operations ip_v
73096 .open = ip_vs_app_open,
73097 .read = seq_read,
73098 .llseek = seq_lseek,
73099- .release = seq_release,
73100+ .release = seq_release_net,
73101 };
73102 #endif
73103
73104diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c
73105--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
73106+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
73107@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
73108 /* if the connection is not template and is created
73109 * by sync, preserve the activity flag.
73110 */
73111- cp->flags |= atomic_read(&dest->conn_flags) &
73112+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
73113 (~IP_VS_CONN_F_INACTIVE);
73114 else
73115- cp->flags |= atomic_read(&dest->conn_flags);
73116+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
73117 cp->dest = dest;
73118
73119 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
73120@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
73121 atomic_set(&cp->refcnt, 1);
73122
73123 atomic_set(&cp->n_control, 0);
73124- atomic_set(&cp->in_pkts, 0);
73125+ atomic_set_unchecked(&cp->in_pkts, 0);
73126
73127 atomic_inc(&ip_vs_conn_count);
73128 if (flags & IP_VS_CONN_F_NO_CPORT)
73129@@ -871,7 +871,7 @@ static const struct file_operations ip_v
73130 .open = ip_vs_conn_open,
73131 .read = seq_read,
73132 .llseek = seq_lseek,
73133- .release = seq_release,
73134+ .release = seq_release_net,
73135 };
73136
73137 static const char *ip_vs_origin_name(unsigned flags)
73138@@ -934,7 +934,7 @@ static const struct file_operations ip_v
73139 .open = ip_vs_conn_sync_open,
73140 .read = seq_read,
73141 .llseek = seq_lseek,
73142- .release = seq_release,
73143+ .release = seq_release_net,
73144 };
73145
73146 #endif
73147@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
73148
73149 /* Don't drop the entry if its number of incoming packets is not
73150 located in [0, 8] */
73151- i = atomic_read(&cp->in_pkts);
73152+ i = atomic_read_unchecked(&cp->in_pkts);
73153 if (i > 8 || i < 0) return 0;
73154
73155 if (!todrop_rate[i]) return 0;
73156diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c
73157--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
73158+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
73159@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
73160 ret = cp->packet_xmit(skb, cp, pp);
73161 /* do not touch skb anymore */
73162
73163- atomic_inc(&cp->in_pkts);
73164+ atomic_inc_unchecked(&cp->in_pkts);
73165 ip_vs_conn_put(cp);
73166 return ret;
73167 }
73168@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
73169 * Sync connection if it is about to close to
73170 * encorage the standby servers to update the connections timeout
73171 */
73172- pkts = atomic_add_return(1, &cp->in_pkts);
73173+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73174 if (af == AF_INET &&
73175 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
73176 (((cp->protocol != IPPROTO_TCP ||
73177diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c
73178--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
73179+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
73180@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
73181 ip_vs_rs_hash(dest);
73182 write_unlock_bh(&__ip_vs_rs_lock);
73183 }
73184- atomic_set(&dest->conn_flags, conn_flags);
73185+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
73186
73187 /* bind the service */
73188 if (!dest->svc) {
73189@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
73190 " %-7s %-6d %-10d %-10d\n",
73191 &dest->addr.in6,
73192 ntohs(dest->port),
73193- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73194+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73195 atomic_read(&dest->weight),
73196 atomic_read(&dest->activeconns),
73197 atomic_read(&dest->inactconns));
73198@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
73199 "%-7s %-6d %-10d %-10d\n",
73200 ntohl(dest->addr.ip),
73201 ntohs(dest->port),
73202- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73203+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73204 atomic_read(&dest->weight),
73205 atomic_read(&dest->activeconns),
73206 atomic_read(&dest->inactconns));
73207@@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
73208 .open = ip_vs_info_open,
73209 .read = seq_read,
73210 .llseek = seq_lseek,
73211- .release = seq_release_private,
73212+ .release = seq_release_net,
73213 };
73214
73215 #endif
73216@@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
73217 .open = ip_vs_stats_seq_open,
73218 .read = seq_read,
73219 .llseek = seq_lseek,
73220- .release = single_release,
73221+ .release = single_release_net,
73222 };
73223
73224 #endif
73225@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
73226
73227 entry.addr = dest->addr.ip;
73228 entry.port = dest->port;
73229- entry.conn_flags = atomic_read(&dest->conn_flags);
73230+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
73231 entry.weight = atomic_read(&dest->weight);
73232 entry.u_threshold = dest->u_threshold;
73233 entry.l_threshold = dest->l_threshold;
73234@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
73235 unsigned char arg[128];
73236 int ret = 0;
73237
73238+ pax_track_stack();
73239+
73240 if (!capable(CAP_NET_ADMIN))
73241 return -EPERM;
73242
73243@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
73244 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
73245
73246 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
73247- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73248+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73249 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
73250 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
73251 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
73252diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c
73253--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
73254+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
73255@@ -438,7 +438,7 @@ static void ip_vs_process_message(const
73256
73257 if (opt)
73258 memcpy(&cp->in_seq, opt, sizeof(*opt));
73259- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
73260+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
73261 cp->state = state;
73262 cp->old_state = cp->state;
73263 /*
73264diff -urNp linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c
73265--- linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
73266+++ linux-2.6.32.45/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
73267@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
73268 else
73269 rc = NF_ACCEPT;
73270 /* do not touch skb anymore */
73271- atomic_inc(&cp->in_pkts);
73272+ atomic_inc_unchecked(&cp->in_pkts);
73273 goto out;
73274 }
73275
73276@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
73277 else
73278 rc = NF_ACCEPT;
73279 /* do not touch skb anymore */
73280- atomic_inc(&cp->in_pkts);
73281+ atomic_inc_unchecked(&cp->in_pkts);
73282 goto out;
73283 }
73284
73285diff -urNp linux-2.6.32.45/net/netfilter/Kconfig linux-2.6.32.45/net/netfilter/Kconfig
73286--- linux-2.6.32.45/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
73287+++ linux-2.6.32.45/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
73288@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
73289
73290 To compile it as a module, choose M here. If unsure, say N.
73291
73292+config NETFILTER_XT_MATCH_GRADM
73293+ tristate '"gradm" match support'
73294+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73295+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73296+ ---help---
73297+ The gradm match allows to match on grsecurity RBAC being enabled.
73298+ It is useful when iptables rules are applied early on bootup to
73299+ prevent connections to the machine (except from a trusted host)
73300+ while the RBAC system is disabled.
73301+
73302 config NETFILTER_XT_MATCH_HASHLIMIT
73303 tristate '"hashlimit" match support'
73304 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73305diff -urNp linux-2.6.32.45/net/netfilter/Makefile linux-2.6.32.45/net/netfilter/Makefile
73306--- linux-2.6.32.45/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
73307+++ linux-2.6.32.45/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
73308@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
73309 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
73310 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73311 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
73312+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
73313 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
73314 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73315 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
73316diff -urNp linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c
73317--- linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
73318+++ linux-2.6.32.45/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
73319@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
73320 static int
73321 ctnetlink_parse_tuple(const struct nlattr * const cda[],
73322 struct nf_conntrack_tuple *tuple,
73323- enum ctattr_tuple type, u_int8_t l3num)
73324+ enum ctattr_type type, u_int8_t l3num)
73325 {
73326 struct nlattr *tb[CTA_TUPLE_MAX+1];
73327 int err;
73328diff -urNp linux-2.6.32.45/net/netfilter/nfnetlink_log.c linux-2.6.32.45/net/netfilter/nfnetlink_log.c
73329--- linux-2.6.32.45/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
73330+++ linux-2.6.32.45/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
73331@@ -68,7 +68,7 @@ struct nfulnl_instance {
73332 };
73333
73334 static DEFINE_RWLOCK(instances_lock);
73335-static atomic_t global_seq;
73336+static atomic_unchecked_t global_seq;
73337
73338 #define INSTANCE_BUCKETS 16
73339 static struct hlist_head instance_table[INSTANCE_BUCKETS];
73340@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
73341 /* global sequence number */
73342 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
73343 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
73344- htonl(atomic_inc_return(&global_seq)));
73345+ htonl(atomic_inc_return_unchecked(&global_seq)));
73346
73347 if (data_len) {
73348 struct nlattr *nla;
73349diff -urNp linux-2.6.32.45/net/netfilter/xt_gradm.c linux-2.6.32.45/net/netfilter/xt_gradm.c
73350--- linux-2.6.32.45/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
73351+++ linux-2.6.32.45/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
73352@@ -0,0 +1,51 @@
73353+/*
73354+ * gradm match for netfilter
73355